From a78728a64aca1c55d2c116194a6adadccb78ba5e Mon Sep 17 00:00:00 2001 From: Shiva Krishna Merla Date: Thu, 24 Oct 2024 08:14:54 -0700 Subject: [PATCH] Remove deprecated rbac proxy container for metrics (#192) * Deprecate kube-rbac-proxy and use controller-runtimes in-built feature for authn/authz for metrics Reference: https://book.kubebuilder.io/reference/metrics Signed-off-by: Shiva Krishna, Merla * vendor dependencies Signed-off-by: Shiva Krishna, Merla --------- Signed-off-by: Shiva Krishna, Merla --- cmd/main.go | 8 +- .../templates/deployment.yaml | 26 +- .../templates/manager-rbac.yaml | 13 + .../templates/proxy-rbac.yaml | 40 - deployments/helm/k8s-nim-operator/values.yaml | 25 +- go.mod | 69 +- go.sum | 141 +- .../github.com/antlr4-go/antlr/v4/.gitignore | 18 + vendor/github.com/antlr4-go/antlr/v4/LICENSE | 28 + .../github.com/antlr4-go/antlr/v4/README.md | 54 + .../github.com/antlr4-go/antlr/v4/antlrdoc.go | 102 + vendor/github.com/antlr4-go/antlr/v4/atn.go | 177 + .../antlr4-go/antlr/v4/atn_config.go | 332 + .../antlr4-go/antlr/v4/atn_config_set.go | 301 + .../antlr/v4/atn_deserialization_options.go | 62 + .../antlr4-go/antlr/v4/atn_deserializer.go | 684 ++ .../antlr4-go/antlr/v4/atn_simulator.go | 41 + .../antlr4-go/antlr/v4/atn_state.go | 461 + .../github.com/antlr4-go/antlr/v4/atn_type.go | 11 + .../antlr4-go/antlr/v4/char_stream.go | 12 + .../antlr/v4/common_token_factory.go | 56 + .../antlr4-go/antlr/v4/common_token_stream.go | 450 + .../antlr4-go/antlr/v4/comparators.go | 150 + .../antlr4-go/antlr/v4/configuration.go | 214 + vendor/github.com/antlr4-go/antlr/v4/dfa.go | 175 + .../antlr4-go/antlr/v4/dfa_serializer.go | 158 + .../antlr4-go/antlr/v4/dfa_state.go | 170 + .../antlr/v4/diagnostic_error_listener.go | 110 + .../antlr4-go/antlr/v4/error_listener.go | 100 + .../antlr4-go/antlr/v4/error_strategy.go | 702 ++ .../github.com/antlr4-go/antlr/v4/errors.go | 259 + .../antlr4-go/antlr/v4/file_stream.go | 67 + .../antlr4-go/antlr/v4/input_stream.go | 157 + .../antlr4-go/antlr/v4/int_stream.go | 16 + .../antlr4-go/antlr/v4/interval_set.go | 330 + .../github.com/antlr4-go/antlr/v4/jcollect.go | 684 ++ vendor/github.com/antlr4-go/antlr/v4/lexer.go | 426 + .../antlr4-go/antlr/v4/lexer_action.go | 452 + .../antlr/v4/lexer_action_executor.go | 173 + .../antlr4-go/antlr/v4/lexer_atn_simulator.go | 677 ++ .../antlr4-go/antlr/v4/ll1_analyzer.go | 219 + vendor/github.com/antlr4-go/antlr/v4/mutex.go | 41 + .../antlr4-go/antlr/v4/mutex_nomutex.go | 32 + .../antlr4-go/antlr/v4/nostatistics.go | 47 + .../github.com/antlr4-go/antlr/v4/parser.go | 700 ++ .../antlr/v4/parser_atn_simulator.go | 1666 +++ .../antlr4-go/antlr/v4/parser_rule_context.go | 421 + .../antlr4-go/antlr/v4/prediction_context.go | 727 ++ .../antlr/v4/prediction_context_cache.go | 48 + .../antlr4-go/antlr/v4/prediction_mode.go | 536 + .../antlr4-go/antlr/v4/recognizer.go | 241 + .../antlr4-go/antlr/v4/rule_context.go | 40 + .../antlr4-go/antlr/v4/semantic_context.go | 464 + .../antlr4-go/antlr/v4/statistics.go | 280 + .../antlr4-go/antlr/v4/stats_data.go | 23 + vendor/github.com/antlr4-go/antlr/v4/token.go | 213 + .../antlr4-go/antlr/v4/token_source.go | 17 + .../antlr4-go/antlr/v4/token_stream.go | 21 + .../antlr/v4/tokenstream_rewriter.go | 662 ++ .../antlr4-go/antlr/v4/trace_listener.go | 32 + .../antlr4-go/antlr/v4/transition.go | 439 + vendor/github.com/antlr4-go/antlr/v4/tree.go | 304 + vendor/github.com/antlr4-go/antlr/v4/trees.go | 142 + vendor/github.com/antlr4-go/antlr/v4/utils.go | 381 + .../github.com/cenkalti/backoff/v4/.gitignore | 25 + vendor/github.com/cenkalti/backoff/v4/LICENSE | 20 + .../github.com/cenkalti/backoff/v4/README.md | 30 + .../github.com/cenkalti/backoff/v4/backoff.go | 66 + .../github.com/cenkalti/backoff/v4/context.go | 62 + .../cenkalti/backoff/v4/exponential.go | 216 + .../github.com/cenkalti/backoff/v4/retry.go | 146 + .../github.com/cenkalti/backoff/v4/ticker.go | 97 + .../github.com/cenkalti/backoff/v4/timer.go | 35 + .../github.com/cenkalti/backoff/v4/tries.go | 38 + .../emicklei/go-restful/v3/CHANGES.md | 20 +- .../emicklei/go-restful/v3/README.md | 1 - .../emicklei/go-restful/v3/compress.go | 10 + .../emicklei/go-restful/v3/curly.go | 48 +- .../go-restful/v3/entity_accessors.go | 12 +- .../emicklei/go-restful/v3/jsr311.go | 2 +- .../go-openapi/jsonpointer/pointer.go | 2 +- .../github.com/go-openapi/swag/BENCHMARK.md | 52 + .../go-openapi/swag/initialism_index.go | 139 +- .../github.com/go-openapi/swag/name_lexem.go | 70 +- vendor/github.com/go-openapi/swag/split.go | 470 +- .../go-openapi/swag/string_bytes.go | 8 + vendor/github.com/go-openapi/swag/util.go | 198 +- vendor/github.com/go-openapi/swag/yaml.go | 3 +- vendor/github.com/google/cel-go/LICENSE | 233 + .../github.com/google/cel-go/cel/BUILD.bazel | 91 + vendor/github.com/google/cel-go/cel/cel.go | 19 + vendor/github.com/google/cel-go/cel/decls.go | 355 + vendor/github.com/google/cel-go/cel/env.go | 888 ++ .../github.com/google/cel-go/cel/folding.go | 559 + .../github.com/google/cel-go/cel/inlining.go | 228 + vendor/github.com/google/cel-go/cel/io.go | 252 + .../github.com/google/cel-go/cel/library.go | 790 ++ vendor/github.com/google/cel-go/cel/macro.go | 576 ++ .../github.com/google/cel-go/cel/optimizer.go | 525 + .../github.com/google/cel-go/cel/options.go | 667 ++ .../github.com/google/cel-go/cel/program.go | 545 + .../github.com/google/cel-go/cel/validator.go | 375 + .../google/cel-go/checker/BUILD.bazel | 64 + .../google/cel-go/checker/checker.go | 696 ++ .../github.com/google/cel-go/checker/cost.go | 705 ++ .../google/cel-go/checker/decls/BUILD.bazel | 19 + .../google/cel-go/checker/decls/decls.go | 237 + .../github.com/google/cel-go/checker/env.go | 284 + .../google/cel-go/checker/errors.go | 88 + .../google/cel-go/checker/format.go | 216 + .../google/cel-go/checker/mapping.go | 49 + .../google/cel-go/checker/options.go | 42 + .../google/cel-go/checker/printer.go | 74 + .../google/cel-go/checker/scopes.go | 147 + .../github.com/google/cel-go/checker/types.go | 314 + .../google/cel-go/common/BUILD.bazel | 34 + .../google/cel-go/common/ast/BUILD.bazel | 54 + .../google/cel-go/common/ast/ast.go | 457 + .../google/cel-go/common/ast/conversion.go | 632 ++ .../google/cel-go/common/ast/expr.go | 860 ++ .../google/cel-go/common/ast/factory.go | 303 + .../google/cel-go/common/ast/navigable.go | 652 ++ .../cel-go/common/containers/BUILD.bazel | 31 + .../cel-go/common/containers/container.go | 316 + .../github.com/google/cel-go/common/cost.go | 40 + .../google/cel-go/common/debug/BUILD.bazel | 20 + .../google/cel-go/common/debug/debug.go | 309 + .../google/cel-go/common/decls/BUILD.bazel | 39 + .../google/cel-go/common/decls/decls.go | 846 ++ vendor/github.com/google/cel-go/common/doc.go | 17 + .../github.com/google/cel-go/common/error.go | 74 + .../github.com/google/cel-go/common/errors.go | 103 + .../cel-go/common/functions/BUILD.bazel | 17 + .../cel-go/common/functions/functions.go | 61 + .../google/cel-go/common/location.go | 51 + .../cel-go/common/operators/BUILD.bazel | 14 + .../cel-go/common/operators/operators.go | 157 + .../cel-go/common/overloads/BUILD.bazel | 14 + .../cel-go/common/overloads/overloads.go | 327 + .../google/cel-go/common/runes/BUILD.bazel | 25 + .../google/cel-go/common/runes/buffer.go | 242 + .../github.com/google/cel-go/common/source.go | 173 + .../google/cel-go/common/stdlib/BUILD.bazel | 23 + .../google/cel-go/common/stdlib/standard.go | 620 ++ .../google/cel-go/common/types/BUILD.bazel | 90 + .../google/cel-go/common/types/any_value.go | 24 + .../google/cel-go/common/types/bool.go | 141 + .../google/cel-go/common/types/bytes.go | 140 + .../google/cel-go/common/types/compare.go | 97 + .../google/cel-go/common/types/doc.go | 17 + .../google/cel-go/common/types/double.go | 211 + .../google/cel-go/common/types/duration.go | 222 + .../google/cel-go/common/types/err.go | 169 + .../google/cel-go/common/types/int.go | 303 + .../google/cel-go/common/types/iterator.go | 55 + .../google/cel-go/common/types/json_value.go | 29 + .../google/cel-go/common/types/list.go | 529 + .../google/cel-go/common/types/map.go | 854 ++ .../google/cel-go/common/types/null.go | 111 + .../google/cel-go/common/types/object.go | 165 + .../google/cel-go/common/types/optional.go | 108 + .../google/cel-go/common/types/overflow.go | 429 + .../google/cel-go/common/types/pb/BUILD.bazel | 53 + .../google/cel-go/common/types/pb/checked.go | 93 + .../google/cel-go/common/types/pb/enum.go | 44 + .../google/cel-go/common/types/pb/equal.go | 206 + .../google/cel-go/common/types/pb/file.go | 202 + .../google/cel-go/common/types/pb/pb.go | 258 + .../google/cel-go/common/types/pb/type.go | 614 ++ .../google/cel-go/common/types/provider.go | 766 ++ .../cel-go/common/types/ref/BUILD.bazel | 20 + .../cel-go/common/types/ref/provider.go | 102 + .../cel-go/common/types/ref/reference.go | 63 + .../google/cel-go/common/types/string.go | 226 + .../google/cel-go/common/types/timestamp.go | 311 + .../cel-go/common/types/traits/BUILD.bazel | 29 + .../cel-go/common/types/traits/comparer.go | 33 + .../cel-go/common/types/traits/container.go | 23 + .../common/types/traits/field_tester.go | 30 + .../cel-go/common/types/traits/indexer.go | 25 + .../cel-go/common/types/traits/iterator.go | 36 + .../cel-go/common/types/traits/lister.go | 33 + .../cel-go/common/types/traits/mapper.go | 33 + .../cel-go/common/types/traits/matcher.go | 23 + .../google/cel-go/common/types/traits/math.go | 62 + .../cel-go/common/types/traits/receiver.go | 24 + .../cel-go/common/types/traits/sizer.go | 25 + .../cel-go/common/types/traits/traits.go | 64 + .../cel-go/common/types/traits/zeroer.go | 21 + .../google/cel-go/common/types/types.go | 823 ++ .../google/cel-go/common/types/uint.go | 256 + .../google/cel-go/common/types/unknown.go | 326 + .../google/cel-go/common/types/util.go | 48 + .../github.com/google/cel-go/ext/BUILD.bazel | 70 + vendor/github.com/google/cel-go/ext/README.md | 669 ++ .../github.com/google/cel-go/ext/bindings.go | 96 + .../github.com/google/cel-go/ext/encoders.go | 87 + .../google/cel-go/ext/formatting.go | 904 ++ vendor/github.com/google/cel-go/ext/guards.go | 63 + vendor/github.com/google/cel-go/ext/lists.go | 94 + vendor/github.com/google/cel-go/ext/math.go | 895 ++ vendor/github.com/google/cel-go/ext/native.go | 752 ++ vendor/github.com/google/cel-go/ext/protos.go | 140 + vendor/github.com/google/cel-go/ext/sets.go | 261 + .../github.com/google/cel-go/ext/strings.go | 765 ++ .../google/cel-go/interpreter/BUILD.bazel | 74 + .../google/cel-go/interpreter/activation.go | 201 + .../cel-go/interpreter/attribute_patterns.go | 397 + .../google/cel-go/interpreter/attributes.go | 1436 +++ .../google/cel-go/interpreter/decorators.go | 272 + .../google/cel-go/interpreter/dispatcher.go | 100 + .../google/cel-go/interpreter/evalstate.go | 79 + .../cel-go/interpreter/functions/BUILD.bazel | 17 + .../cel-go/interpreter/functions/functions.go | 39 + .../cel-go/interpreter/interpretable.go | 1264 +++ .../google/cel-go/interpreter/interpreter.go | 185 + .../cel-go/interpreter/optimizations.go | 46 + .../google/cel-go/interpreter/planner.go | 756 ++ .../google/cel-go/interpreter/prune.go | 543 + .../google/cel-go/interpreter/runtimecost.go | 316 + .../google/cel-go/parser/BUILD.bazel | 58 + .../github.com/google/cel-go/parser/errors.go | 43 + .../google/cel-go/parser/gen/BUILD.bazel | 26 + .../google/cel-go/parser/gen/CEL.g4 | 200 + .../google/cel-go/parser/gen/CEL.interp | 99 + .../google/cel-go/parser/gen/CEL.tokens | 64 + .../google/cel-go/parser/gen/CELLexer.interp | 136 + .../google/cel-go/parser/gen/CELLexer.tokens | 64 + .../cel-go/parser/gen/cel_base_listener.go | 219 + .../cel-go/parser/gen/cel_base_visitor.go | 141 + .../google/cel-go/parser/gen/cel_lexer.go | 344 + .../google/cel-go/parser/gen/cel_listener.go | 208 + .../google/cel-go/parser/gen/cel_parser.go | 6274 ++++++++++++ .../google/cel-go/parser/gen/cel_visitor.go | 110 + .../google/cel-go/parser/gen/doc.go | 16 + .../google/cel-go/parser/gen/generate.sh | 35 + .../github.com/google/cel-go/parser/helper.go | 482 + .../github.com/google/cel-go/parser/input.go | 129 + .../github.com/google/cel-go/parser/macro.go | 402 + .../google/cel-go/parser/options.go | 140 + .../github.com/google/cel-go/parser/parser.go | 1012 ++ .../google/cel-go/parser/unescape.go | 237 + .../google/cel-go/parser/unparser.go | 629 ++ .../grpc-ecosystem/grpc-gateway/v2/LICENSE | 27 + .../v2/internal/httprule/BUILD.bazel | 35 + .../v2/internal/httprule/compile.go | 121 + .../grpc-gateway/v2/internal/httprule/fuzz.go | 11 + .../v2/internal/httprule/parse.go | 368 + .../v2/internal/httprule/types.go | 60 + .../grpc-gateway/v2/runtime/BUILD.bazel | 97 + .../grpc-gateway/v2/runtime/context.go | 417 + .../grpc-gateway/v2/runtime/convert.go | 318 + .../grpc-gateway/v2/runtime/doc.go | 5 + .../grpc-gateway/v2/runtime/errors.go | 191 + .../grpc-gateway/v2/runtime/fieldmask.go | 168 + .../grpc-gateway/v2/runtime/handler.go | 247 + .../v2/runtime/marshal_httpbodyproto.go | 32 + .../grpc-gateway/v2/runtime/marshal_json.go | 50 + .../grpc-gateway/v2/runtime/marshal_jsonpb.go | 349 + .../grpc-gateway/v2/runtime/marshal_proto.go | 60 + .../grpc-gateway/v2/runtime/marshaler.go | 50 + .../v2/runtime/marshaler_registry.go | 109 + .../grpc-gateway/v2/runtime/mux.go | 537 + .../grpc-gateway/v2/runtime/pattern.go | 381 + .../grpc-gateway/v2/runtime/proto2_convert.go | 80 + .../grpc-gateway/v2/runtime/query.go | 372 + .../grpc-gateway/v2/utilities/BUILD.bazel | 31 + .../grpc-gateway/v2/utilities/doc.go | 2 + .../grpc-gateway/v2/utilities/pattern.go | 22 + .../v2/utilities/readerfactory.go | 19 + .../v2/utilities/string_array_flag.go | 33 + .../grpc-gateway/v2/utilities/trie.go | 174 + .../klauspost/compress/.goreleaser.yml | 10 +- .../github.com/klauspost/compress/README.md | 62 +- .../klauspost/compress/fse/decompress.go | 2 +- .../klauspost/compress/huff0/decompress.go | 4 +- .../compress/internal/snapref/encode_other.go | 2 +- vendor/github.com/klauspost/compress/s2sx.mod | 2 +- .../klauspost/compress/zstd/blockdec.go | 7 +- .../klauspost/compress/zstd/blockenc.go | 20 + .../klauspost/compress/zstd/decodeheader.go | 56 +- .../klauspost/compress/zstd/decoder.go | 2 +- .../klauspost/compress/zstd/dict.go | 31 + .../klauspost/compress/zstd/enc_best.go | 49 +- .../klauspost/compress/zstd/enc_better.go | 45 +- .../klauspost/compress/zstd/enc_dfast.go | 16 +- .../klauspost/compress/zstd/encoder.go | 45 +- .../compress/zstd/encoder_options.go | 6 +- .../klauspost/compress/zstd/framedec.go | 4 +- .../klauspost/compress/zstd/frameenc.go | 2 +- .../compress/zstd/fse_decoder_generic.go | 11 +- .../zstd/internal/xxhash/xxhash_arm64.s | 4 +- .../klauspost/compress/zstd/matchlen_amd64.s | 10 +- .../klauspost/compress/zstd/seqdec_amd64.go | 4 +- .../klauspost/compress/zstd/seqdec_amd64.s | 144 +- .../klauspost/compress/zstd/zstd.go | 4 + .../prometheus/client_golang/NOTICE | 5 - .../internal/github.com/golang/gddo/LICENSE | 27 + .../golang/gddo/httputil/header/header.go | 145 + .../golang/gddo/httputil/negotiate.go | 36 + .../collectors/go_collector_latest.go | 4 +- .../client_golang/prometheus/go_collector.go | 55 +- .../prometheus/go_collector_latest.go | 19 +- .../client_golang/prometheus/histogram.go | 268 +- .../internal/go_collector_options.go | 2 + .../client_golang/prometheus/metric.go | 2 +- .../prometheus/process_collector.go | 29 +- .../prometheus/process_collector_other.go | 14 + .../prometheus/promhttp/delegator.go | 6 + .../client_golang/prometheus/promhttp/http.go | 113 +- .../client_golang/prometheus/registry.go | 17 +- .../client_golang/prometheus/summary.go | 42 + .../client_golang/prometheus/vec.go | 2 +- .../prometheus/common/expfmt/decode.go | 14 +- .../prometheus/common/expfmt/encode.go | 24 +- .../prometheus/common/expfmt/expfmt.go | 76 +- .../common/expfmt/openmetrics_create.go | 2 +- .../prometheus/common/expfmt/text_create.go | 4 +- .../prometheus/common/expfmt/text_parse.go | 162 +- .../prometheus/common/model/labels.go | 27 +- .../common/model/labelset_string.go | 2 - .../common/model/labelset_string_go120.go | 39 - .../prometheus/common/model/metric.go | 31 +- .../github.com/stoewer/go-strcase/.gitignore | 17 + .../stoewer/go-strcase/.golangci.yml | 26 + vendor/github.com/stoewer/go-strcase/LICENSE | 21 + .../github.com/stoewer/go-strcase/README.md | 50 + vendor/github.com/stoewer/go-strcase/camel.go | 40 + vendor/github.com/stoewer/go-strcase/doc.go | 8 + .../github.com/stoewer/go-strcase/helper.go | 71 + vendor/github.com/stoewer/go-strcase/kebab.go | 14 + vendor/github.com/stoewer/go-strcase/snake.go | 58 + .../net/http/otelhttp/common.go | 14 - .../net/http/otelhttp/config.go | 15 +- .../net/http/otelhttp/handler.go | 103 +- .../otelhttp/internal/request/body_wrapper.go | 75 + .../internal/request/resp_writer_wrapper.go | 119 + .../net/http/otelhttp/internal/semconv/env.go | 159 +- .../semconv/{v1.24.0.go => httpconv.go} | 153 +- .../http/otelhttp/internal/semconv/util.go | 11 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 200 + .../otelhttp/internal/semconvutil/netconv.go | 2 +- .../net/http/otelhttp/transport.go | 108 +- .../net/http/otelhttp/version.go | 2 +- .../instrumentation/net/http/otelhttp/wrap.go | 99 - vendor/go.opentelemetry.io/otel/.golangci.yml | 13 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 121 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 6 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 18 +- vendor/go.opentelemetry.io/otel/Makefile | 22 +- vendor/go.opentelemetry.io/otel/README.md | 34 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 12 +- .../go.opentelemetry.io/otel/attribute/set.go | 40 +- .../otel/baggage/baggage.go | 150 +- .../go.opentelemetry.io/otel/codes/codes.go | 2 +- vendor/go.opentelemetry.io/otel/doc.go | 2 + .../otel/exporters/otlp/otlptrace/LICENSE | 201 + .../otel/exporters/otlp/otlptrace/README.md | 3 + .../otel/exporters/otlp/otlptrace/clients.go | 43 + .../otel/exporters/otlp/otlptrace/doc.go | 10 + .../otel/exporters/otlp/otlptrace/exporter.go | 105 + .../internal/tracetransform/attribute.go | 147 + .../tracetransform/instrumentation.go | 19 + .../internal/tracetransform/resource.go | 17 + .../otlptrace/internal/tracetransform/span.go | 219 + .../otlp/otlptrace/otlptracegrpc/LICENSE | 201 + .../otlp/otlptrace/otlptracegrpc/README.md | 3 + .../otlp/otlptrace/otlptracegrpc/client.go | 295 + .../otlp/otlptrace/otlptracegrpc/doc.go | 65 + .../otlp/otlptrace/otlptracegrpc/exporter.go | 20 + .../internal/envconfig/envconfig.go | 215 + .../otlptrace/otlptracegrpc/internal/gen.go | 24 + .../internal/otlpconfig/envconfig.go | 142 + .../internal/otlpconfig/options.go | 353 + .../internal/otlpconfig/optiontypes.go | 40 + .../otlptracegrpc/internal/otlpconfig/tls.go | 26 + .../otlptracegrpc/internal/partialsuccess.go | 56 + .../otlptracegrpc/internal/retry/retry.go | 145 + .../otlp/otlptrace/otlptracegrpc/options.go | 210 + .../otel/exporters/otlp/otlptrace/version.go | 9 + .../otel/internal/global/meter.go | 317 +- .../otel/internal/rawhelpers.go | 12 +- .../otel/metric/asyncfloat64.go | 2 +- .../otel/metric/asyncint64.go | 2 +- .../otel/metric/instrument.go | 2 +- .../go.opentelemetry.io/otel/metric/meter.go | 13 + .../otel/metric/noop/README.md | 3 + .../otel/metric/noop/noop.go | 281 + vendor/go.opentelemetry.io/otel/renovate.json | 8 + vendor/go.opentelemetry.io/otel/sdk/LICENSE | 201 + vendor/go.opentelemetry.io/otel/sdk/README.md | 3 + .../otel/sdk/instrumentation/README.md | 3 + .../otel/sdk/instrumentation/doc.go | 13 + .../otel/sdk/instrumentation/library.go | 9 + .../otel/sdk/instrumentation/scope.go | 15 + .../otel/sdk/internal/env/env.go | 166 + .../otel/sdk/internal/x/README.md | 46 + .../otel/sdk/internal/x/x.go | 66 + .../otel/sdk/resource/README.md | 3 + .../otel/sdk/resource/auto.go | 118 + .../otel/sdk/resource/builtin.go | 118 + .../otel/sdk/resource/config.go | 195 + .../otel/sdk/resource/container.go | 89 + .../otel/sdk/resource/doc.go | 20 + .../otel/sdk/resource/env.go | 95 + .../otel/sdk/resource/host_id.go | 109 + .../otel/sdk/resource/host_id_bsd.go | 12 + .../otel/sdk/resource/host_id_darwin.go | 8 + .../otel/sdk/resource/host_id_exec.go | 18 + .../otel/sdk/resource/host_id_linux.go | 11 + .../otel/sdk/resource/host_id_readfile.go | 17 + .../otel/sdk/resource/host_id_unsupported.go | 19 + .../otel/sdk/resource/host_id_windows.go | 36 + .../otel/sdk/resource/os.go | 89 + .../otel/sdk/resource/os_release_darwin.go | 91 + .../otel/sdk/resource/os_release_unix.go | 143 + .../otel/sdk/resource/os_unix.go | 79 + .../otel/sdk/resource/os_unsupported.go | 15 + .../otel/sdk/resource/os_windows.go | 89 + .../otel/sdk/resource/process.go | 173 + .../otel/sdk/resource/resource.go | 294 + .../otel/sdk/trace/README.md | 3 + .../otel/sdk/trace/batch_span_processor.go | 413 + .../go.opentelemetry.io/otel/sdk/trace/doc.go | 10 + .../otel/sdk/trace/event.go | 26 + .../otel/sdk/trace/evictedqueue.go | 64 + .../otel/sdk/trace/id_generator.go | 81 + .../otel/sdk/trace/link.go | 23 + .../otel/sdk/trace/provider.go | 493 + .../otel/sdk/trace/sampler_env.go | 97 + .../otel/sdk/trace/sampling.go | 282 + .../otel/sdk/trace/simple_span_processor.go | 121 + .../otel/sdk/trace/snapshot.go | 133 + .../otel/sdk/trace/span.go | 895 ++ .../otel/sdk/trace/span_exporter.go | 36 + .../otel/sdk/trace/span_limits.go | 114 + .../otel/sdk/trace/span_processor.go | 61 + .../otel/sdk/trace/tracer.go | 153 + .../otel/sdk/trace/version.go | 9 + .../go.opentelemetry.io/otel/sdk/version.go | 9 + .../otel/semconv/v1.17.0/README.md | 3 + .../otel/semconv/v1.17.0/doc.go | 9 + .../semconv/{v1.24.0 => v1.17.0}/event.go | 148 +- .../otel/semconv/v1.17.0/exception.go | 9 + .../otel/semconv/v1.17.0/http.go | 10 + .../semconv/{v1.24.0 => v1.17.0}/resource.go | 2922 +++--- .../otel/semconv/v1.17.0/schema.go | 9 + .../otel/semconv/v1.17.0/trace.go | 3364 ++++++ .../otel/semconv/v1.24.0/README.md | 3 - .../otel/semconv/v1.24.0/attribute_group.go | 4387 -------- .../otel/semconv/v1.24.0/trace.go | 1323 --- .../otel/semconv/v1.26.0/README.md | 3 + .../otel/semconv/v1.26.0/attribute_group.go | 8996 +++++++++++++++++ .../otel/semconv/{v1.24.0 => v1.26.0}/doc.go | 4 +- .../semconv/{v1.24.0 => v1.26.0}/exception.go | 2 +- .../semconv/{v1.24.0 => v1.26.0}/metric.go | 466 +- .../semconv/{v1.24.0 => v1.26.0}/schema.go | 4 +- .../go.opentelemetry.io/otel/trace/context.go | 2 +- vendor/go.opentelemetry.io/otel/trace/doc.go | 2 +- .../otel/trace/noop/README.md | 3 + .../otel/trace/noop/noop.go | 112 + .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 249 - .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 10 + .../otel/verify_examples.sh | 74 - .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 10 +- vendor/go.opentelemetry.io/proto/otlp/LICENSE | 201 + .../collector/trace/v1/trace_service.pb.go | 367 + .../collector/trace/v1/trace_service.pb.gw.go | 171 + .../trace/v1/trace_service_grpc.pb.go | 109 + .../proto/otlp/common/v1/common.pb.go | 630 ++ .../proto/otlp/resource/v1/resource.pb.go | 193 + .../proto/otlp/trace/v1/trace.pb.go | 1281 +++ .../golang.org/x/crypto/argon2/blamka_amd64.s | 2972 +++++- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 5167 ++++++++-- .../x/crypto/blake2b/blake2b_amd64.s | 1681 ++- .../x/crypto/internal/poly1305/sum_amd64.s | 133 +- vendor/golang.org/x/crypto/sha3/shake.go | 4 +- .../golang.org/x/crypto/ssh/agent/keyring.go | 9 + vendor/golang.org/x/crypto/ssh/server.go | 4 +- .../x/exp/constraints/constraints.go | 50 + vendor/golang.org/x/exp/slices/cmp.go | 44 + vendor/golang.org/x/exp/slices/slices.go | 515 + vendor/golang.org/x/exp/slices/sort.go | 197 + .../golang.org/x/exp/slices/zsortanyfunc.go | 479 + .../golang.org/x/exp/slices/zsortordered.go | 481 + vendor/golang.org/x/net/http2/config.go | 122 + vendor/golang.org/x/net/http2/config_go124.go | 61 + .../x/net/http2/config_pre_go124.go | 16 + vendor/golang.org/x/net/http2/http2.go | 53 +- vendor/golang.org/x/net/http2/server.go | 181 +- vendor/golang.org/x/net/http2/transport.go | 143 +- vendor/golang.org/x/net/http2/write.go | 10 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 +++ .../golang.org/x/net/websocket/websocket.go | 2 +- vendor/golang.org/x/oauth2/LICENSE | 4 +- vendor/golang.org/x/oauth2/token.go | 7 + .../x/sync/singleflight/singleflight.go | 214 + vendor/golang.org/x/sys/cpu/cpu.go | 19 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 2 +- .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 137 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 +- vendor/golang.org/x/sys/unix/README.md | 2 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 5 +- vendor/golang.org/x/sys/unix/syscall_aix.go | 2 +- .../golang.org/x/sys/unix/syscall_darwin.go | 37 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 + vendor/golang.org/x/sys/unix/syscall_linux.go | 63 +- .../x/sys/unix/syscall_linux_arm64.go | 2 + .../x/sys/unix/syscall_linux_loong64.go | 2 + .../x/sys/unix/syscall_linux_riscv64.go | 2 + .../golang.org/x/sys/unix/vgetrandom_linux.go | 13 + .../unix/vgetrandom_unsupported.go} | 11 +- .../x/sys/unix/zerrors_darwin_amd64.go | 7 + .../x/sys/unix/zerrors_darwin_arm64.go | 7 + vendor/golang.org/x/sys/unix/zerrors_linux.go | 13 +- .../x/sys/unix/zerrors_linux_386.go | 5 + .../x/sys/unix/zerrors_linux_amd64.go | 5 + .../x/sys/unix/zerrors_linux_arm.go | 5 + .../x/sys/unix/zerrors_linux_arm64.go | 5 + .../x/sys/unix/zerrors_linux_loong64.go | 5 + .../x/sys/unix/zerrors_linux_mips.go | 5 + .../x/sys/unix/zerrors_linux_mips64.go | 5 + .../x/sys/unix/zerrors_linux_mips64le.go | 5 + .../x/sys/unix/zerrors_linux_mipsle.go | 5 + .../x/sys/unix/zerrors_linux_ppc.go | 5 + .../x/sys/unix/zerrors_linux_ppc64.go | 5 + .../x/sys/unix/zerrors_linux_ppc64le.go | 5 + .../x/sys/unix/zerrors_linux_riscv64.go | 5 + .../x/sys/unix/zerrors_linux_s390x.go | 5 + .../x/sys/unix/zerrors_linux_sparc64.go | 5 + .../x/sys/unix/zerrors_zos_s390x.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 20 + .../x/sys/unix/zsyscall_darwin_amd64.s | 5 + .../x/sys/unix/zsyscall_darwin_arm64.go | 20 + .../x/sys/unix/zsyscall_darwin_arm64.s | 5 + .../golang.org/x/sys/unix/zsyscall_linux.go | 17 - .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 2 +- .../x/sys/unix/zsysnum_linux_loong64.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 +- .../x/sys/unix/ztypes_darwin_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_arm64.go | 13 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 90 +- .../x/sys/unix/ztypes_linux_riscv64.go | 33 + .../golang.org/x/sys/windows/dll_windows.go | 2 +- .../golang.org/x/sys/windows/registry/key.go | 205 + .../x/sys/windows/registry/mksyscall.go | 9 + .../x/sys/windows/registry/syscall.go | 32 + .../x/sys/windows/registry/value.go | 386 + .../sys/windows/registry/zsyscall_windows.go | 117 + .../x/sys/windows/syscall_windows.go | 4 + .../golang.org/x/sys/windows/types_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 38 + vendor/golang.org/x/term/term_windows.go | 1 + .../x/text/feature/plural/common.go | 70 + .../x/text/feature/plural/message.go | 244 + .../x/text/feature/plural/plural.go | 262 + .../x/text/feature/plural/tables.go | 552 + .../x/text/internal/catmsg/catmsg.go | 417 + .../x/text/internal/catmsg/codec.go | 407 + .../x/text/internal/catmsg/varint.go | 62 + .../x/text/internal/format/format.go | 41 + .../x/text/internal/format/parser.go | 358 + vendor/golang.org/x/text/internal/internal.go | 49 + vendor/golang.org/x/text/internal/match.go | 67 + .../x/text/internal/number/common.go | 55 + .../x/text/internal/number/decimal.go | 500 + .../x/text/internal/number/format.go | 535 + .../x/text/internal/number/number.go | 152 + .../x/text/internal/number/pattern.go | 485 + .../internal/number/roundingmode_string.go | 30 + .../x/text/internal/number/tables.go | 1219 +++ .../x/text/internal/stringset/set.go | 86 + vendor/golang.org/x/text/message/catalog.go | 36 + .../x/text/message/catalog/catalog.go | 365 + .../golang.org/x/text/message/catalog/dict.go | 129 + .../golang.org/x/text/message/catalog/go19.go | 15 + .../x/text/message/catalog/gopre19.go | 23 + vendor/golang.org/x/text/message/doc.go | 99 + vendor/golang.org/x/text/message/format.go | 510 + vendor/golang.org/x/text/message/message.go | 192 + vendor/golang.org/x/text/message/print.go | 984 ++ vendor/golang.org/x/time/LICENSE | 4 +- vendor/golang.org/x/time/rate/rate.go | 17 +- .../x/tools/cmd/stringer/stringer.go | 203 +- .../x/tools/go/ast/inspector/inspector.go | 9 + .../x/tools/go/ast/inspector/iter.go | 85 + vendor/golang.org/x/tools/go/packages/doc.go | 15 +- .../x/tools/go/packages/external.go | 2 +- .../x/tools/go/packages/loadmode_string.go | 69 +- .../x/tools/go/packages/packages.go | 28 +- .../x/tools/go/types/objectpath/objectpath.go | 14 +- .../x/tools/go/types/typeutil/callee.go | 68 + .../x/tools/go/types/typeutil/imports.go | 30 + .../x/tools/go/types/typeutil/map.go | 517 + .../tools/go/types/typeutil/methodsetcache.go | 71 + .../x/tools/go/types/typeutil/ui.go | 53 + .../x/tools/internal/aliases/aliases.go | 10 +- .../x/tools/internal/aliases/aliases_go121.go | 35 - .../x/tools/internal/aliases/aliases_go122.go | 33 +- .../x/tools/internal/gcimporter/bimport.go | 61 - .../x/tools/internal/gcimporter/gcimporter.go | 11 +- .../x/tools/internal/gcimporter/iexport.go | 260 +- .../x/tools/internal/gcimporter/iimport.go | 31 +- .../internal/gcimporter/newInterface10.go | 22 - .../internal/gcimporter/newInterface11.go | 14 - .../tools/internal/gcimporter/predeclared.go | 91 + .../internal/gcimporter/support_go118.go | 34 - .../x/tools/internal/gcimporter/unified_no.go | 10 - .../tools/internal/gcimporter/unified_yes.go | 10 - .../tools/internal/gcimporter/ureader_yes.go | 44 +- .../x/tools/internal/gocommand/invoke.go | 18 +- .../x/tools/internal/pkgbits/decoder.go | 34 +- .../x/tools/internal/pkgbits/encoder.go | 43 +- .../x/tools/internal/pkgbits/frames_go1.go | 21 - .../x/tools/internal/pkgbits/frames_go17.go | 28 - .../x/tools/internal/pkgbits/support.go | 2 +- .../x/tools/internal/pkgbits/sync.go | 23 + .../internal/pkgbits/syncmarker_string.go | 7 +- .../x/tools/internal/pkgbits/version.go | 85 + .../x/tools/internal/stdlib/manifest.go | 2 +- .../internal/tokeninternal/tokeninternal.go | 137 - .../x/tools/internal/typeparams/common.go | 140 + .../x/tools/internal/typeparams/coretype.go | 150 + .../x/tools/internal/typeparams/free.go | 118 + .../x/tools/internal/typeparams/normalize.go | 218 + .../x/tools/internal/typeparams/termlist.go | 163 + .../x/tools/internal/typeparams/typeterm.go | 169 + .../x/tools/internal/typesinternal/element.go | 133 + .../tools/internal/typesinternal/errorcode.go | 8 +- .../x/tools/internal/typesinternal/recv.go | 8 +- .../x/tools/internal/versions/toolchain.go | 14 - .../internal/versions/toolchain_go119.go | 14 - .../internal/versions/toolchain_go120.go | 14 - .../x/tools/internal/versions/types.go | 33 +- .../x/tools/internal/versions/types_go121.go | 30 - .../x/tools/internal/versions/types_go122.go | 41 - .../genproto/googleapis/api/LICENSE | 202 + .../api/expr/v1alpha1/checked.pb.go | 1664 +++ .../googleapis/api/expr/v1alpha1/eval.pb.go | 580 ++ .../api/expr/v1alpha1/explain.pb.go | 275 + .../googleapis/api/expr/v1alpha1/syntax.pb.go | 2040 ++++ .../googleapis/api/expr/v1alpha1/value.pb.go | 721 ++ .../googleapis/api/httpbody/httpbody.pb.go | 235 + .../rpc/errdetails/error_details.pb.go | 1314 +++ .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 73 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/MAINTAINERS.md | 36 + vendor/google.golang.org/grpc/Makefile | 49 + vendor/google.golang.org/grpc/README.md | 107 + vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 141 + vendor/google.golang.org/grpc/backoff.go | 61 + .../google.golang.org/grpc/backoff/backoff.go | 52 + .../grpc/balancer/balancer.go | 464 + .../grpc/balancer/base/balancer.go | 264 + .../grpc/balancer/base/base.go | 71 + .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/pickfirst/pickfirst.go | 283 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_wrapper.go | 365 + .../grpc_binarylog_v1/binarylog.pb.go | 1183 +++ vendor/google.golang.org/grpc/call.go | 74 + .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 1827 ++++ vendor/google.golang.org/grpc/codec.go | 105 + .../grpc/credentials/credentials.go | 291 + .../grpc/credentials/insecure/insecure.go | 98 + .../google.golang.org/grpc/credentials/tls.go | 283 + vendor/google.golang.org/grpc/dialoptions.go | 767 ++ vendor/google.golang.org/grpc/doc.go | 26 + .../grpc/encoding/encoding.go | 131 + .../grpc/encoding/encoding_v2.go | 81 + .../grpc/encoding/gzip/gzip.go | 132 + .../grpc/encoding/proto/proto.go | 96 + .../grpc/experimental/stats/metricregistry.go | 269 + .../grpc/experimental/stats/metrics.go | 114 + .../grpc/grpclog/component.go | 10 +- .../google.golang.org/grpc/grpclog/grpclog.go | 104 +- .../grpc/grpclog/internal/grpclog.go | 26 + .../grpc/grpclog/internal/logger.go | 87 + .../internal/loggerv2.go} | 178 +- .../google.golang.org/grpc/grpclog/logger.go | 59 +- .../grpc/grpclog/loggerv2.go | 181 +- .../grpc/health/grpc_health_v1/health.pb.go | 308 + .../health/grpc_health_v1/health_grpc.pb.go | 234 + vendor/google.golang.org/grpc/interceptor.go | 104 + .../grpc/internal/backoff/backoff.go | 109 + .../balancer/gracefulswitch/config.go | 82 + .../balancer/gracefulswitch/gracefulswitch.go | 419 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 192 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 208 + .../grpc/internal/binarylog/method_logger.go | 446 + .../grpc/internal/binarylog/sink.go | 170 + .../grpc/internal/buffer/unbounded.go | 116 + .../grpc/internal/channelz/channel.go | 255 + .../grpc/internal/channelz/channelmap.go | 395 + .../grpc/internal/channelz/funcs.go | 230 + .../grpc/internal/channelz/logging.go | 75 + .../grpc/internal/channelz/server.go | 119 + .../grpc/internal/channelz/socket.go | 130 + .../grpc/internal/channelz/subchannel.go | 151 + .../grpc/internal/channelz/syscall_linux.go | 65 + .../internal/channelz/syscall_nonlinux.go | 47 + .../grpc/internal/channelz/trace.go | 204 + .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 75 + .../grpc/internal/credentials/syscallconn.go | 58 + .../grpc/internal/credentials/util.go | 52 + .../grpc/internal/envconfig/envconfig.go | 76 + .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 56 + .../grpc/internal/experimental.go | 8 +- .../{prefixLogger.go => prefix_logger.go} | 40 +- .../internal/grpcsync/callback_serializer.go | 112 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/grpcutil/compressor.go | 42 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 88 + .../grpc/internal/grpcutil/regex.go | 31 + .../grpc/internal/idle/idle.go | 278 + .../grpc/internal/internal.go | 24 +- .../grpc/internal/metadata/metadata.go | 132 + .../grpc/internal/pretty/pretty.go | 73 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 458 + .../resolver/dns/internal/internal.go | 77 + .../resolver/passthrough/passthrough.go | 64 + .../grpc/internal/resolver/unix/unix.go | 78 + .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 180 + .../grpc/internal/stats/labels.go | 42 + .../internal/stats/metrics_recorder_list.go | 95 + .../grpc/internal/status/status.go | 4 +- .../grpc/internal/syscall/syscall_linux.go | 112 + .../grpc/internal/syscall/syscall_nonlinux.go | 77 + .../grpc/internal/tcp_keepalive_unix.go | 2 +- .../grpc/internal/tcp_keepalive_windows.go | 2 +- .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 1035 ++ .../grpc/internal/transport/defaults.go | 55 + .../grpc/internal/transport/flowcontrol.go | 215 + .../grpc/internal/transport/handler_server.go | 502 + .../grpc/internal/transport/http2_client.go | 1823 ++++ .../grpc/internal/transport/http2_server.go | 1475 +++ .../grpc/internal/transport/http_util.go | 468 + .../grpc/internal/transport/logging.go | 40 + .../transport/networktype/networktype.go | 46 + .../grpc/internal/transport/proxy.go | 150 + .../grpc/internal/transport/transport.go | 934 ++ .../grpc/keepalive/keepalive.go | 99 + .../google.golang.org/grpc/mem/buffer_pool.go | 194 + .../grpc/mem/buffer_slice.go | 226 + vendor/google.golang.org/grpc/mem/buffers.go | 252 + .../grpc/metadata/metadata.go | 295 + vendor/google.golang.org/grpc/peer/peer.go | 83 + .../google.golang.org/grpc/picker_wrapper.go | 219 + vendor/google.golang.org/grpc/preloader.go | 85 + .../grpc/resolver/dns/dns_resolver.go | 60 + vendor/google.golang.org/grpc/resolver/map.go | 251 + .../grpc/resolver/resolver.go | 332 + .../grpc/resolver_wrapper.go | 201 + vendor/google.golang.org/grpc/rpc_util.go | 1069 ++ vendor/google.golang.org/grpc/server.go | 2208 ++++ .../google.golang.org/grpc/service_config.go | 356 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 343 + vendor/google.golang.org/grpc/stream.go | 1841 ++++ .../grpc/stream_interfaces.go | 238 + vendor/google.golang.org/grpc/tap/tap.go | 62 + vendor/google.golang.org/grpc/trace.go | 143 + .../google.golang.org/grpc/trace_notrace.go | 52 + .../google.golang.org/grpc/trace_withtrace.go | 39 + vendor/google.golang.org/grpc/version.go | 22 + .../protobuf/encoding/protojson/decode.go | 685 ++ .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 380 + .../encoding/protojson/well_known_types.go | 876 ++ .../protobuf/internal/descopts/options.go | 20 +- .../internal/editionssupport/editions.go | 2 +- .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 278 + .../protobuf/internal/filedesc/desc.go | 4 + .../protobuf/internal/filedesc/desc_init.go | 2 + .../protobuf/internal/filedesc/desc_lazy.go | 2 + .../protobuf/internal/filedesc/editions.go | 2 +- .../protobuf/internal/genid/doc.go | 2 +- .../internal/genid/go_features_gen.go | 15 +- .../protobuf/internal/genid/map_entry.go | 2 +- .../protobuf/internal/genid/wrappers.go | 2 +- .../protobuf/internal/impl/codec_extension.go | 11 +- .../protobuf/internal/impl/codec_field.go | 3 + .../protobuf/internal/impl/codec_message.go | 3 + .../protobuf/internal/impl/codec_reflect.go | 210 - .../protobuf/internal/impl/codec_unsafe.go | 3 - .../protobuf/internal/impl/convert.go | 2 +- .../protobuf/internal/impl/encode.go | 2 +- .../protobuf/internal/impl/equal.go | 224 + .../internal/impl/legacy_extension.go | 1 + .../protobuf/internal/impl/message.go | 4 +- .../protobuf/internal/impl/pointer_reflect.go | 215 - .../protobuf/internal/impl/pointer_unsafe.go | 3 - .../protobuf/internal/strs/strings_pure.go | 28 - .../internal/strs/strings_unsafe_go120.go | 3 +- .../internal/strs/strings_unsafe_go121.go | 3 +- .../protobuf/internal/version/version.go | 4 +- .../google.golang.org/protobuf/proto/equal.go | 9 + .../protobuf/proto/extension.go | 71 + .../protobuf/reflect/protodesc/desc_init.go | 4 + .../protobuf/reflect/protodesc/editions.go | 2 +- .../protobuf/reflect/protoreflect/methods.go | 10 + .../reflect/protoreflect/value_pure.go | 60 - .../protoreflect/value_unsafe_go120.go | 3 +- .../protoreflect/value_unsafe_go121.go | 3 +- .../protobuf/runtime/protoiface/methods.go | 18 + .../types/descriptorpb/descriptor.pb.go | 748 +- .../protobuf/types/dynamicpb/dynamic.go | 718 ++ .../protobuf/types/dynamicpb/types.go | 184 + .../types/gofeaturespb/go_features.pb.go | 24 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 24 +- .../protobuf/types/known/emptypb/empty.pb.go | 150 + .../types/known/fieldmaskpb/field_mask.pb.go | 572 ++ .../types/known/structpb/struct.pb.go | 782 ++ .../types/known/timestamppb/timestamp.pb.go | 24 +- .../types/known/wrapperspb/wrappers.pb.go | 632 ++ .../apiserver/pkg/apis/apiserver/doc.go | 21 + .../pkg/apis/apiserver/install/install.go | 43 + .../apiserver/pkg/apis/apiserver/register.go | 53 + .../apiserver/pkg/apis/apiserver/types.go | 406 + .../pkg/apis/apiserver/types_encryption.go | 149 + .../pkg/apis/apiserver/v1/defaults.go | 50 + .../apiserver/pkg/apis/apiserver/v1/doc.go | 23 + .../pkg/apis/apiserver/v1/register.go | 56 + .../apiserver/pkg/apis/apiserver/v1/types.go | 50 + .../pkg/apis/apiserver/v1/types_encryption.go | 149 + .../apiserver/v1/zz_generated.conversion.go | 363 + .../apiserver/v1/zz_generated.deepcopy.go | 281 + .../apiserver/v1/zz_generated.defaults.go | 46 + .../pkg/apis/apiserver/v1alpha1/conversion.go | 32 + .../pkg/apis/apiserver/v1alpha1/defaults.go | 36 + .../pkg/apis/apiserver/v1alpha1/doc.go | 24 + .../pkg/apis/apiserver/v1alpha1/register.go | 63 + .../pkg/apis/apiserver/v1alpha1/types.go | 620 ++ .../v1alpha1/zz_generated.conversion.go | 964 ++ .../v1alpha1/zz_generated.deepcopy.go | 606 ++ .../v1alpha1/zz_generated.defaults.go | 43 + .../pkg/apis/apiserver/v1beta1/conversion.go | 32 + .../pkg/apis/apiserver/v1beta1/defaults.go | 36 + .../pkg/apis/apiserver/v1beta1/doc.go | 23 + .../pkg/apis/apiserver/v1beta1/register.go | 61 + .../pkg/apis/apiserver/v1beta1/types.go | 591 ++ .../v1beta1/zz_generated.conversion.go | 900 ++ .../v1beta1/zz_generated.deepcopy.go | 553 + .../v1beta1/zz_generated.defaults.go | 43 + .../apis/apiserver/validation/validation.go | 795 ++ .../validation/validation_encryption.go | 451 + .../apis/apiserver/zz_generated.deepcopy.go | 803 ++ vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS | 8 + vendor/k8s.io/apiserver/pkg/apis/audit/doc.go | 20 + .../apiserver/pkg/apis/audit/helpers.go | 38 + .../apiserver/pkg/apis/audit/register.go | 53 + .../k8s.io/apiserver/pkg/apis/audit/types.go | 312 + .../k8s.io/apiserver/pkg/apis/audit/v1/doc.go | 25 + .../pkg/apis/audit/v1/generated.pb.go | 3230 ++++++ .../pkg/apis/audit/v1/generated.proto | 287 + .../apiserver/pkg/apis/audit/v1/register.go | 58 + .../apiserver/pkg/apis/audit/v1/types.go | 318 + .../apis/audit/v1/zz_generated.conversion.go | 327 + .../apis/audit/v1/zz_generated.deepcopy.go | 297 + .../apis/audit/v1/zz_generated.defaults.go | 33 + .../pkg/apis/audit/zz_generated.deepcopy.go | 297 + .../k8s.io/apiserver/pkg/apis/cel/config.go | 45 + vendor/k8s.io/apiserver/pkg/audit/OWNERS | 8 + vendor/k8s.io/apiserver/pkg/audit/context.go | 188 + .../k8s.io/apiserver/pkg/audit/evaluator.go | 45 + vendor/k8s.io/apiserver/pkg/audit/format.go | 73 + vendor/k8s.io/apiserver/pkg/audit/metrics.go | 111 + vendor/k8s.io/apiserver/pkg/audit/request.go | 312 + vendor/k8s.io/apiserver/pkg/audit/scheme.go | 38 + vendor/k8s.io/apiserver/pkg/audit/types.go | 46 + vendor/k8s.io/apiserver/pkg/audit/union.go | 71 + .../authenticator/audagnostic.go | 90 + .../authentication/authenticator/audiences.go | 63 + .../authenticator/interfaces.go | 65 + .../authenticatorfactory/delegating.go | 127 + .../authenticatorfactory/loopback.go | 29 + .../authenticatorfactory/metrics.go | 69 + .../authenticatorfactory/requestheader.go | 37 + .../pkg/authentication/cel/compile.go | 155 + .../pkg/authentication/cel/interface.go | 148 + .../pkg/authentication/cel/mapper.go | 97 + .../group/authenticated_group_adder.go | 66 + .../pkg/authentication/group/group_adder.go | 57 + .../authentication/group/token_group_adder.go | 57 + .../request/anonymous/anonymous.go | 62 + .../request/bearertoken/bearertoken.go | 76 + .../request/headerrequest/requestheader.go | 204 + .../headerrequest/requestheader_controller.go | 340 + .../pkg/authentication/request/union/union.go | 71 + .../request/websocket/protocol.go | 108 + .../pkg/authentication/request/x509/OWNERS | 8 + .../pkg/authentication/request/x509/doc.go | 19 + .../request/x509/verify_options.go | 71 + .../pkg/authentication/request/x509/x509.go | 285 + .../pkg/authentication/serviceaccount/util.go | 228 + .../token/cache/cache_simple.go | 49 + .../token/cache/cache_striped.go | 60 + .../token/cache/cached_token_authenticator.go | 318 + .../pkg/authentication/token/cache/stats.go | 126 + .../token/tokenfile/tokenfile.go | 99 + .../apiserver/pkg/authentication/user/doc.go | 19 + .../apiserver/pkg/authentication/user/user.go | 84 + .../authorization/authorizer/interfaces.go | 184 + .../pkg/authorization/authorizer/rule.go | 73 + .../authorizerfactory/builtin.go | 95 + .../authorizerfactory/delegating.go | 60 + .../authorizerfactory/metrics.go | 82 + .../pkg/authorization/cel/compile.go | 330 + .../pkg/authorization/cel/interface.go | 41 + .../pkg/authorization/cel/matcher.go | 91 + .../pkg/authorization/cel/metrics.go | 120 + vendor/k8s.io/apiserver/pkg/cel/OWNERS | 11 + vendor/k8s.io/apiserver/pkg/cel/cidr.go | 87 + .../apiserver/pkg/cel/environment/base.go | 241 + .../pkg/cel/environment/environment.go | 298 + vendor/k8s.io/apiserver/pkg/cel/errors.go | 124 + vendor/k8s.io/apiserver/pkg/cel/escaping.go | 170 + vendor/k8s.io/apiserver/pkg/cel/format.go | 73 + vendor/k8s.io/apiserver/pkg/cel/ip.go | 86 + .../k8s.io/apiserver/pkg/cel/library/authz.go | 769 ++ .../k8s.io/apiserver/pkg/cel/library/cidr.go | 287 + .../k8s.io/apiserver/pkg/cel/library/cost.go | 581 ++ .../apiserver/pkg/cel/library/format.go | 270 + vendor/k8s.io/apiserver/pkg/cel/library/ip.go | 329 + .../k8s.io/apiserver/pkg/cel/library/lists.go | 316 + .../apiserver/pkg/cel/library/quantity.go | 380 + .../k8s.io/apiserver/pkg/cel/library/regex.go | 193 + .../k8s.io/apiserver/pkg/cel/library/test.go | 83 + .../k8s.io/apiserver/pkg/cel/library/urls.go | 240 + vendor/k8s.io/apiserver/pkg/cel/limits.go | 52 + vendor/k8s.io/apiserver/pkg/cel/quantity.go | 76 + vendor/k8s.io/apiserver/pkg/cel/types.go | 598 ++ vendor/k8s.io/apiserver/pkg/cel/url.go | 80 + vendor/k8s.io/apiserver/pkg/cel/value.go | 769 ++ .../apiserver/pkg/endpoints/request/OWNERS | 4 + .../pkg/endpoints/request/context.go | 78 + .../apiserver/pkg/endpoints/request/doc.go | 20 + .../pkg/endpoints/request/received_time.go | 45 + .../pkg/endpoints/request/requestinfo.go | 305 + .../request/server_shutdown_signal.go | 55 + .../pkg/endpoints/request/webhook_duration.go | 311 + vendor/k8s.io/apiserver/pkg/features/OWNERS | 4 + .../apiserver/pkg/features/kube_features.go | 428 + .../server/dynamiccertificates/cert_key.go | 59 + .../server/dynamiccertificates/client_ca.go | 69 + .../configmap_cafile_content.go | 277 + .../dynamic_cafile_content.go | 293 + .../dynamic_serving_content.go | 236 + .../dynamic_sni_content.go | 49 + .../server/dynamiccertificates/interfaces.go | 68 + .../dynamiccertificates/named_certificates.go | 91 + .../dynamiccertificates/static_content.go | 120 + .../server/dynamiccertificates/tlsconfig.go | 287 + .../dynamiccertificates/union_content.go | 105 + .../pkg/server/dynamiccertificates/util.go | 66 + .../pkg/server/egressselector/config.go | 254 + .../server/egressselector/egress_selector.go | 406 + .../server/egressselector/metrics/metrics.go | 133 + .../pkg/util/feature/feature_gate.go | 33 + .../apiserver/pkg/util/version/registry.go | 454 + .../apiserver/pkg/util/version/version.go | 181 + .../pkg/util/webhook/authentication.go | 276 + .../apiserver/pkg/util/webhook/client.go | 257 + .../apiserver/pkg/util/webhook/error.go | 48 + .../apiserver/pkg/util/webhook/gencerts.sh | 148 + .../apiserver/pkg/util/webhook/metrics.go | 52 + .../pkg/util/webhook/serviceresolver.go | 48 + .../apiserver/pkg/util/webhook/validation.go | 115 + .../apiserver/pkg/util/webhook/webhook.go | 170 + .../x509metrics/server_cert_deprecations.go | 225 + .../k8s.io/apiserver/pkg/warning/context.go | 60 + .../authenticator/token/webhook/metrics.go | 35 + .../authenticator/token/webhook/webhook.go | 327 + .../plugin/pkg/authorizer/webhook/gencerts.sh | 102 + .../pkg/authorizer/webhook/metrics/metrics.go | 166 + .../plugin/pkg/authorizer/webhook/webhook.go | 605 ++ vendor/k8s.io/client-go/tools/events/OWNERS | 10 + vendor/k8s.io/client-go/tools/events/doc.go | 19 + .../tools/events/event_broadcaster.go | 457 + .../client-go/tools/events/event_recorder.go | 113 + vendor/k8s.io/client-go/tools/events/fake.go | 52 + .../k8s.io/client-go/tools/events/helper.go | 64 + .../client-go/tools/events/interfaces.go | 92 + .../cli/flag/ciphersuites_flag.go | 147 + .../colon_separated_multimap_string_string.go | 114 + .../cli/flag/configuration_map.go | 53 + .../k8s.io/component-base/cli/flag/flags.go | 66 + .../langle_separated_map_string_string.go | 82 + .../cli/flag/map_string_bool.go | 90 + .../cli/flag/map_string_string.go | 112 + .../cli/flag/namedcertkey_flag.go | 113 + vendor/k8s.io/component-base/cli/flag/noop.go | 41 + .../component-base/cli/flag/omitempty.go | 24 + .../component-base/cli/flag/sectioned.go | 105 + .../component-base/cli/flag/string_flag.go | 56 + .../cli/flag/string_slice_flag.go | 62 + .../component-base/cli/flag/tristate.go | 83 + .../k8s.io/component-base/featuregate/OWNERS | 16 + .../featuregate/feature_gate.go | 738 ++ vendor/k8s.io/component-base/metrics/OWNERS | 11 + .../k8s.io/component-base/metrics/buckets.go | 53 + .../component-base/metrics/collector.go | 190 + .../k8s.io/component-base/metrics/counter.go | 242 + vendor/k8s.io/component-base/metrics/desc.go | 225 + vendor/k8s.io/component-base/metrics/gauge.go | 277 + .../component-base/metrics/histogram.go | 214 + vendor/k8s.io/component-base/metrics/http.go | 87 + .../k8s.io/component-base/metrics/labels.go | 22 + .../metrics/legacyregistry/registry.go | 92 + .../k8s.io/component-base/metrics/metric.go | 235 + .../k8s.io/component-base/metrics/options.go | 136 + vendor/k8s.io/component-base/metrics/opts.go | 380 + .../metrics/processstarttime.go | 51 + .../metrics/processstarttime_others.go | 39 + .../metrics/processstarttime_windows.go | 34 + .../metrics/prometheus/feature/metrics.go | 53 + .../prometheusextension/timing_histogram.go | 189 + .../timing_histogram_vec.go | 111 + .../prometheusextension/weighted_histogram.go | 203 + .../weighted_histogram_vec.go | 106 + .../k8s.io/component-base/metrics/registry.go | 394 + .../k8s.io/component-base/metrics/summary.go | 226 + .../metrics/timing_histogram.go | 270 + vendor/k8s.io/component-base/metrics/value.go | 70 + .../k8s.io/component-base/metrics/version.go | 37 + .../component-base/metrics/version_parser.go | 50 + .../k8s.io/component-base/metrics/wrappers.go | 167 + vendor/k8s.io/component-base/tracing/OWNERS | 8 + .../component-base/tracing/api/v1/config.go | 88 + .../component-base/tracing/api/v1/doc.go | 29 + .../component-base/tracing/api/v1/types.go | 32 + .../tracing/api/v1/zz_generated.deepcopy.go | 48 + .../k8s.io/component-base/tracing/tracing.go | 98 + vendor/k8s.io/component-base/tracing/utils.go | 134 + .../kube-openapi/pkg/util/proto/document.go | 2 +- .../pkg/validation/errors/.gitignore | 2 + .../pkg/validation/errors/LICENSE | 202 + .../kube-openapi/pkg/validation/errors/api.go | 46 + .../kube-openapi/pkg/validation/errors/doc.go | 26 + .../pkg/validation/errors/headers.go | 44 + .../pkg/validation/errors/schema.go | 573 ++ .../pkg/validation/strfmt/.gitignore | 2 + .../pkg/validation/strfmt/LICENSE | 202 + .../pkg/validation/strfmt/bson.go | 103 + .../pkg/validation/strfmt/bson/objectid.go | 122 + .../pkg/validation/strfmt/date.go | 103 + .../pkg/validation/strfmt/default.go | 1562 +++ .../kube-openapi/pkg/validation/strfmt/doc.go | 18 + .../pkg/validation/strfmt/duration.go | 180 + .../pkg/validation/strfmt/format.go | 233 + .../pkg/validation/strfmt/time.go | 172 + .../forked/golang/golang-lru/lru.go | 133 + vendor/k8s.io/utils/lru/lru.go | 87 + vendor/k8s.io/utils/path/file.go | 78 + vendor/modules.txt | 298 +- .../konnectivity-client/LICENSE | 201 + .../konnectivity-client/pkg/client/client.go | 564 ++ .../konnectivity-client/pkg/client/conn.go | 157 + .../pkg/client/metrics/metrics.go | 164 + .../pkg/common/metrics/metrics.go | 78 + .../proto/client/client.pb.go | 893 ++ .../proto/client/client.proto | 104 + .../proto/client/client_grpc.pb.go | 150 + .../pkg/metrics/filters/filters.go | 122 + vendor/sigs.k8s.io/json/Makefile | 2 +- vendor/sigs.k8s.io/json/OWNERS | 2 +- .../internal/golang/encoding/json/decode.go | 140 +- .../internal/golang/encoding/json/encode.go | 490 +- .../internal/golang/encoding/json/fold.go | 150 +- .../internal/golang/encoding/json/indent.go | 119 +- .../internal/golang/encoding/json/scanner.go | 4 +- .../internal/golang/encoding/json/stream.go | 41 +- 1107 files changed, 212547 insertions(+), 13746 deletions(-) delete mode 100644 deployments/helm/k8s-nim-operator/templates/proxy-rbac.yaml create mode 100644 vendor/github.com/antlr4-go/antlr/v4/.gitignore create mode 100644 vendor/github.com/antlr4-go/antlr/v4/LICENSE create mode 100644 vendor/github.com/antlr4-go/antlr/v4/README.md create mode 100644 vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_state.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/atn_type.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/char_stream.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/comparators.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/configuration.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/dfa.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/dfa_state.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/error_listener.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/error_strategy.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/errors.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/file_stream.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/input_stream.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/int_stream.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/interval_set.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/jcollect.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/lexer.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/lexer_action.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/mutex.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/nostatistics.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/parser.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/recognizer.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/rule_context.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/semantic_context.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/statistics.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/stats_data.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/token.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/token_source.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/token_stream.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/trace_listener.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/transition.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/tree.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/trees.go create mode 100644 vendor/github.com/antlr4-go/antlr/v4/utils.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/.gitignore create mode 100644 vendor/github.com/cenkalti/backoff/v4/LICENSE create mode 100644 vendor/github.com/cenkalti/backoff/v4/README.md create mode 100644 vendor/github.com/cenkalti/backoff/v4/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/context.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/timer.go create mode 100644 vendor/github.com/cenkalti/backoff/v4/tries.go create mode 100644 vendor/github.com/go-openapi/swag/BENCHMARK.md create mode 100644 vendor/github.com/go-openapi/swag/string_bytes.go create mode 100644 vendor/github.com/google/cel-go/LICENSE create mode 100644 vendor/github.com/google/cel-go/cel/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/cel/cel.go create mode 100644 vendor/github.com/google/cel-go/cel/decls.go create mode 100644 vendor/github.com/google/cel-go/cel/env.go create mode 100644 vendor/github.com/google/cel-go/cel/folding.go create mode 100644 vendor/github.com/google/cel-go/cel/inlining.go create mode 100644 vendor/github.com/google/cel-go/cel/io.go create mode 100644 vendor/github.com/google/cel-go/cel/library.go create mode 100644 vendor/github.com/google/cel-go/cel/macro.go create mode 100644 vendor/github.com/google/cel-go/cel/optimizer.go create mode 100644 vendor/github.com/google/cel-go/cel/options.go create mode 100644 vendor/github.com/google/cel-go/cel/program.go create mode 100644 vendor/github.com/google/cel-go/cel/validator.go create mode 100644 vendor/github.com/google/cel-go/checker/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/checker/checker.go create mode 100644 vendor/github.com/google/cel-go/checker/cost.go create mode 100644 vendor/github.com/google/cel-go/checker/decls/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/checker/decls/decls.go create mode 100644 vendor/github.com/google/cel-go/checker/env.go create mode 100644 vendor/github.com/google/cel-go/checker/errors.go create mode 100644 vendor/github.com/google/cel-go/checker/format.go create mode 100644 vendor/github.com/google/cel-go/checker/mapping.go create mode 100644 vendor/github.com/google/cel-go/checker/options.go create mode 100644 vendor/github.com/google/cel-go/checker/printer.go create mode 100644 vendor/github.com/google/cel-go/checker/scopes.go create mode 100644 vendor/github.com/google/cel-go/checker/types.go create mode 100644 vendor/github.com/google/cel-go/common/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/ast/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/ast/ast.go create mode 100644 vendor/github.com/google/cel-go/common/ast/conversion.go create mode 100644 vendor/github.com/google/cel-go/common/ast/expr.go create mode 100644 vendor/github.com/google/cel-go/common/ast/factory.go create mode 100644 vendor/github.com/google/cel-go/common/ast/navigable.go create mode 100644 vendor/github.com/google/cel-go/common/containers/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/containers/container.go create mode 100644 vendor/github.com/google/cel-go/common/cost.go create mode 100644 vendor/github.com/google/cel-go/common/debug/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/debug/debug.go create mode 100644 vendor/github.com/google/cel-go/common/decls/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/decls/decls.go create mode 100644 vendor/github.com/google/cel-go/common/doc.go create mode 100644 vendor/github.com/google/cel-go/common/error.go create mode 100644 vendor/github.com/google/cel-go/common/errors.go create mode 100644 vendor/github.com/google/cel-go/common/functions/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/functions/functions.go create mode 100644 vendor/github.com/google/cel-go/common/location.go create mode 100644 vendor/github.com/google/cel-go/common/operators/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/operators/operators.go create mode 100644 vendor/github.com/google/cel-go/common/overloads/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/overloads/overloads.go create mode 100644 vendor/github.com/google/cel-go/common/runes/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/runes/buffer.go create mode 100644 vendor/github.com/google/cel-go/common/source.go create mode 100644 vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/stdlib/standard.go create mode 100644 vendor/github.com/google/cel-go/common/types/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/types/any_value.go create mode 100644 vendor/github.com/google/cel-go/common/types/bool.go create mode 100644 vendor/github.com/google/cel-go/common/types/bytes.go create mode 100644 vendor/github.com/google/cel-go/common/types/compare.go create mode 100644 vendor/github.com/google/cel-go/common/types/doc.go create mode 100644 vendor/github.com/google/cel-go/common/types/double.go create mode 100644 vendor/github.com/google/cel-go/common/types/duration.go create mode 100644 vendor/github.com/google/cel-go/common/types/err.go create mode 100644 vendor/github.com/google/cel-go/common/types/int.go create mode 100644 vendor/github.com/google/cel-go/common/types/iterator.go create mode 100644 vendor/github.com/google/cel-go/common/types/json_value.go create mode 100644 vendor/github.com/google/cel-go/common/types/list.go create mode 100644 vendor/github.com/google/cel-go/common/types/map.go create mode 100644 vendor/github.com/google/cel-go/common/types/null.go create mode 100644 vendor/github.com/google/cel-go/common/types/object.go create mode 100644 vendor/github.com/google/cel-go/common/types/optional.go create mode 100644 vendor/github.com/google/cel-go/common/types/overflow.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/types/pb/checked.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/enum.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/equal.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/file.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/pb.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/type.go create mode 100644 vendor/github.com/google/cel-go/common/types/provider.go create mode 100644 vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/types/ref/provider.go create mode 100644 vendor/github.com/google/cel-go/common/types/ref/reference.go create mode 100644 vendor/github.com/google/cel-go/common/types/string.go create mode 100644 vendor/github.com/google/cel-go/common/types/timestamp.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/common/types/traits/comparer.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/container.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/field_tester.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/indexer.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/iterator.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/lister.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/mapper.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/matcher.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/math.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/receiver.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/sizer.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/traits.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/zeroer.go create mode 100644 vendor/github.com/google/cel-go/common/types/types.go create mode 100644 vendor/github.com/google/cel-go/common/types/uint.go create mode 100644 vendor/github.com/google/cel-go/common/types/unknown.go create mode 100644 vendor/github.com/google/cel-go/common/types/util.go create mode 100644 vendor/github.com/google/cel-go/ext/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/ext/README.md create mode 100644 vendor/github.com/google/cel-go/ext/bindings.go create mode 100644 vendor/github.com/google/cel-go/ext/encoders.go create mode 100644 vendor/github.com/google/cel-go/ext/formatting.go create mode 100644 vendor/github.com/google/cel-go/ext/guards.go create mode 100644 vendor/github.com/google/cel-go/ext/lists.go create mode 100644 vendor/github.com/google/cel-go/ext/math.go create mode 100644 vendor/github.com/google/cel-go/ext/native.go create mode 100644 vendor/github.com/google/cel-go/ext/protos.go create mode 100644 vendor/github.com/google/cel-go/ext/sets.go create mode 100644 vendor/github.com/google/cel-go/ext/strings.go create mode 100644 vendor/github.com/google/cel-go/interpreter/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/interpreter/activation.go create mode 100644 vendor/github.com/google/cel-go/interpreter/attribute_patterns.go create mode 100644 vendor/github.com/google/cel-go/interpreter/attributes.go create mode 100644 vendor/github.com/google/cel-go/interpreter/decorators.go create mode 100644 vendor/github.com/google/cel-go/interpreter/dispatcher.go create mode 100644 vendor/github.com/google/cel-go/interpreter/evalstate.go create mode 100644 vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/interpreter/functions/functions.go create mode 100644 vendor/github.com/google/cel-go/interpreter/interpretable.go create mode 100644 vendor/github.com/google/cel-go/interpreter/interpreter.go create mode 100644 vendor/github.com/google/cel-go/interpreter/optimizations.go create mode 100644 vendor/github.com/google/cel-go/interpreter/planner.go create mode 100644 vendor/github.com/google/cel-go/interpreter/prune.go create mode 100644 vendor/github.com/google/cel-go/interpreter/runtimecost.go create mode 100644 vendor/github.com/google/cel-go/parser/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/parser/errors.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/BUILD.bazel create mode 100644 vendor/github.com/google/cel-go/parser/gen/CEL.g4 create mode 100644 vendor/github.com/google/cel-go/parser/gen/CEL.interp create mode 100644 vendor/github.com/google/cel-go/parser/gen/CEL.tokens create mode 100644 vendor/github.com/google/cel-go/parser/gen/CELLexer.interp create mode 100644 vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_lexer.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_listener.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_parser.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_visitor.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/doc.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/generate.sh create mode 100644 vendor/github.com/google/cel-go/parser/helper.go create mode 100644 vendor/github.com/google/cel-go/parser/input.go create mode 100644 vendor/github.com/google/cel-go/parser/macro.go create mode 100644 vendor/github.com/google/cel-go/parser/options.go create mode 100644 vendor/github.com/google/cel-go/parser/parser.go create mode 100644 vendor/github.com/google/cel-go/parser/unescape.go create mode 100644 vendor/github.com/google/cel-go/parser/unparser.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go create mode 100644 vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go delete mode 100644 vendor/github.com/prometheus/common/model/labelset_string_go120.go create mode 100644 vendor/github.com/stoewer/go-strcase/.gitignore create mode 100644 vendor/github.com/stoewer/go-strcase/.golangci.yml create mode 100644 vendor/github.com/stoewer/go-strcase/LICENSE create mode 100644 vendor/github.com/stoewer/go-strcase/README.md create mode 100644 vendor/github.com/stoewer/go-strcase/camel.go create mode 100644 vendor/github.com/stoewer/go-strcase/doc.go create mode 100644 vendor/github.com/stoewer/go-strcase/helper.go create mode 100644 vendor/github.com/stoewer/go-strcase/kebab.go create mode 100644 vendor/github.com/stoewer/go-strcase/snake.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go rename vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/{v1.24.0.go => httpconv.go} (57%) delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go create mode 100644 vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/metric/noop/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/LICENSE create mode 100644 vendor/go.opentelemetry.io/otel/sdk/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/auto.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/config.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/container.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/process.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/resource/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/README.md create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/event.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/link.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/trace/version.go create mode 100644 vendor/go.opentelemetry.io/otel/sdk/version.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.17.0}/event.go (63%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.17.0}/resource.go (66%) create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/doc.go (96%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/exception.go (98%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/metric.go (77%) rename vendor/go.opentelemetry.io/otel/semconv/{v1.24.0 => v1.26.0}/schema.go (85%) create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/README.md create mode 100644 vendor/go.opentelemetry.io/otel/trace/noop/noop.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go delete mode 100644 vendor/go.opentelemetry.io/otel/verify_examples.sh create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh create mode 100644 vendor/go.opentelemetry.io/proto/otlp/LICENSE create mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go create mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go create mode 100644 vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go create mode 100644 vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go create mode 100644 vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go create mode 100644 vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go create mode 100644 vendor/golang.org/x/exp/constraints/constraints.go create mode 100644 vendor/golang.org/x/exp/slices/cmp.go create mode 100644 vendor/golang.org/x/exp/slices/slices.go create mode 100644 vendor/golang.org/x/exp/slices/sort.go create mode 100644 vendor/golang.org/x/exp/slices/zsortanyfunc.go create mode 100644 vendor/golang.org/x/exp/slices/zsortordered.go create mode 100644 vendor/golang.org/x/net/http2/config.go create mode 100644 vendor/golang.org/x/net/http2/config_go124.go create mode 100644 vendor/golang.org/x/net/http2/config_pre_go124.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/golang.org/x/sync/singleflight/singleflight.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 vendor/golang.org/x/sys/unix/vgetrandom_linux.go rename vendor/golang.org/x/{tools/internal/versions/toolchain_go121.go => sys/unix/vgetrandom_unsupported.go} (56%) create mode 100644 vendor/golang.org/x/sys/windows/registry/key.go create mode 100644 vendor/golang.org/x/sys/windows/registry/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/registry/value.go create mode 100644 vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go create mode 100644 vendor/golang.org/x/text/feature/plural/common.go create mode 100644 vendor/golang.org/x/text/feature/plural/message.go create mode 100644 vendor/golang.org/x/text/feature/plural/plural.go create mode 100644 vendor/golang.org/x/text/feature/plural/tables.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/catmsg.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/codec.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/varint.go create mode 100644 vendor/golang.org/x/text/internal/format/format.go create mode 100644 vendor/golang.org/x/text/internal/format/parser.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/text/internal/number/common.go create mode 100644 vendor/golang.org/x/text/internal/number/decimal.go create mode 100644 vendor/golang.org/x/text/internal/number/format.go create mode 100644 vendor/golang.org/x/text/internal/number/number.go create mode 100644 vendor/golang.org/x/text/internal/number/pattern.go create mode 100644 vendor/golang.org/x/text/internal/number/roundingmode_string.go create mode 100644 vendor/golang.org/x/text/internal/number/tables.go create mode 100644 vendor/golang.org/x/text/internal/stringset/set.go create mode 100644 vendor/golang.org/x/text/message/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog/dict.go create mode 100644 vendor/golang.org/x/text/message/catalog/go19.go create mode 100644 vendor/golang.org/x/text/message/catalog/gopre19.go create mode 100644 vendor/golang.org/x/text/message/doc.go create mode 100644 vendor/golang.org/x/text/message/format.go create mode 100644 vendor/golang.org/x/text/message/message.go create mode 100644 vendor/golang.org/x/text/message/print.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/iter.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/callee.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/map.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go create mode 100644 vendor/golang.org/x/tools/go/types/typeutil/ui.go delete mode 100644 vendor/golang.org/x/tools/internal/aliases/aliases_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go create mode 100644 vendor/golang.org/x/tools/internal/gcimporter/predeclared.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/support_go118.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_no.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go delete mode 100644 vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go create mode 100644 vendor/golang.org/x/tools/internal/pkgbits/version.go delete mode 100644 vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/common.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/coretype.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/free.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/normalize.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/termlist.go create mode 100644 vendor/golang.org/x/tools/internal/typeparams/typeterm.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/element.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go119.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/toolchain_go120.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go121.go delete mode 100644 vendor/golang.org/x/tools/internal/versions/types_go122.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go create mode 100644 vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_wrapper.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding_v2.go create mode 100644 vendor/google.golang.org/grpc/encoding/gzip/gzip.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metricregistry.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metrics.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/logger.go rename vendor/google.golang.org/grpc/{internal/grpclog/grpclog.go => grpclog/internal/loggerv2.go} (52%) create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/channel.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/channelmap.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/server.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/socket.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/subchannel.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/trace.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go rename vendor/google.golang.org/grpc/internal/grpclog/{prefixLogger.go => prefix_logger.go} (63%) create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go create mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/labels.go create mode 100644 vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/proxy.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_slice.go create mode 100644 vendor/google.golang.org/grpc/mem/buffers.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/stream_interfaces.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/trace_notrace.go create mode 100644 vendor/google.golang.org/grpc/trace_withtrace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/equal.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go create mode 100644 vendor/google.golang.org/protobuf/types/dynamicpb/types.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_encryption.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/cel/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/audit/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/evaluator.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/format.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/request.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/scheme.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/audit/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/cel/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/cel/cidr.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/environment/base.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/environment/environment.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/escaping.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/format.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/ip.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/authz.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/cidr.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/cost.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/format.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/ip.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/lists.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/quantity.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/regex.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/test.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/library/urls.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/limits.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/quantity.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/url.go create mode 100644 vendor/k8s.io/apiserver/pkg/cel/value.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go create mode 100644 vendor/k8s.io/apiserver/pkg/features/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/egressselector/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/version/registry.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/version/version.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/client.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/error.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/validation.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go create mode 100644 vendor/k8s.io/apiserver/pkg/warning/context.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/metrics.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go create mode 100644 vendor/k8s.io/client-go/tools/events/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/events/doc.go create mode 100644 vendor/k8s.io/client-go/tools/events/event_broadcaster.go create mode 100644 vendor/k8s.io/client-go/tools/events/event_recorder.go create mode 100644 vendor/k8s.io/client-go/tools/events/fake.go create mode 100644 vendor/k8s.io/client-go/tools/events/helper.go create mode 100644 vendor/k8s.io/client-go/tools/events/interfaces.go create mode 100644 vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go create mode 100644 vendor/k8s.io/component-base/cli/flag/configuration_map.go create mode 100644 vendor/k8s.io/component-base/cli/flag/flags.go create mode 100644 vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go create mode 100644 vendor/k8s.io/component-base/cli/flag/map_string_bool.go create mode 100644 vendor/k8s.io/component-base/cli/flag/map_string_string.go create mode 100644 vendor/k8s.io/component-base/cli/flag/namedcertkey_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/noop.go create mode 100644 vendor/k8s.io/component-base/cli/flag/omitempty.go create mode 100644 vendor/k8s.io/component-base/cli/flag/sectioned.go create mode 100644 vendor/k8s.io/component-base/cli/flag/string_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/string_slice_flag.go create mode 100644 vendor/k8s.io/component-base/cli/flag/tristate.go create mode 100644 vendor/k8s.io/component-base/featuregate/OWNERS create mode 100644 vendor/k8s.io/component-base/featuregate/feature_gate.go create mode 100644 vendor/k8s.io/component-base/metrics/OWNERS create mode 100644 vendor/k8s.io/component-base/metrics/buckets.go create mode 100644 vendor/k8s.io/component-base/metrics/collector.go create mode 100644 vendor/k8s.io/component-base/metrics/counter.go create mode 100644 vendor/k8s.io/component-base/metrics/desc.go create mode 100644 vendor/k8s.io/component-base/metrics/gauge.go create mode 100644 vendor/k8s.io/component-base/metrics/histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/http.go create mode 100644 vendor/k8s.io/component-base/metrics/labels.go create mode 100644 vendor/k8s.io/component-base/metrics/legacyregistry/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/metric.go create mode 100644 vendor/k8s.io/component-base/metrics/options.go create mode 100644 vendor/k8s.io/component-base/metrics/opts.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_others.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_windows.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go create mode 100644 vendor/k8s.io/component-base/metrics/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/summary.go create mode 100644 vendor/k8s.io/component-base/metrics/timing_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/value.go create mode 100644 vendor/k8s.io/component-base/metrics/version.go create mode 100644 vendor/k8s.io/component-base/metrics/version_parser.go create mode 100644 vendor/k8s.io/component-base/metrics/wrappers.go create mode 100644 vendor/k8s.io/component-base/tracing/OWNERS create mode 100644 vendor/k8s.io/component-base/tracing/api/v1/config.go create mode 100644 vendor/k8s.io/component-base/tracing/api/v1/doc.go create mode 100644 vendor/k8s.io/component-base/tracing/api/v1/types.go create mode 100644 vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/component-base/tracing/tracing.go create mode 100644 vendor/k8s.io/component-base/tracing/utils.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/duration.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/format.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/strfmt/time.go create mode 100644 vendor/k8s.io/utils/internal/third_party/forked/golang/golang-lru/lru.go create mode 100644 vendor/k8s.io/utils/lru/lru.go create mode 100644 vendor/k8s.io/utils/path/file.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/LICENSE create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/client.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/conn.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics/metrics.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics/metrics.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.pb.go create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client.proto create mode 100644 vendor/sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client/client_grpc.pb.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/metrics/filters/filters.go diff --git a/cmd/main.go b/cmd/main.go index 700cf6f3..fb086856 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -31,6 +31,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -118,9 +119,10 @@ func main() { mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Metrics: metricsserver.Options{ - BindAddress: metricsAddr, - SecureServing: secureMetrics, - TLSOpts: tlsOpts, + BindAddress: metricsAddr, + SecureServing: secureMetrics, + TLSOpts: tlsOpts, + FilterProvider: filters.WithAuthenticationAndAuthorization, }, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, diff --git a/deployments/helm/k8s-nim-operator/templates/deployment.yaml b/deployments/helm/k8s-nim-operator/templates/deployment.yaml index 46d56671..393cb684 100644 --- a/deployments/helm/k8s-nim-operator/templates/deployment.yaml +++ b/deployments/helm/k8s-nim-operator/templates/deployment.yaml @@ -23,32 +23,18 @@ spec: kubectl.kubernetes.io/default-container: manager spec: containers: - - args: {{- toYaml .Values.kubeProxy.args | nindent 8 }} - image: {{ .Values.kubeProxy.image.repository }}:{{ .Values.kubeProxy.image.tag }} - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: {{- toYaml .Values.kubeProxy.resources | nindent - 10 }} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - args: {{- toYaml .Values.operator.args | nindent 8 }} command: - /manager image: {{ include "k8s-nim-operator.fullimage" . }} imagePullPolicy: {{ .Values.operator.image.pullPolicy }} env: - - name: WATCH_NAMESPACE - value: "" - - name: OPERATOR_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + - name: WATCH_NAMESPACE + value: "" + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace livenessProbe: httpGet: path: /healthz diff --git a/deployments/helm/k8s-nim-operator/templates/manager-rbac.yaml b/deployments/helm/k8s-nim-operator/templates/manager-rbac.yaml index 99f6a95d..38d92220 100644 --- a/deployments/helm/k8s-nim-operator/templates/manager-rbac.yaml +++ b/deployments/helm/k8s-nim-operator/templates/manager-rbac.yaml @@ -282,6 +282,19 @@ rules: - get - list - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/deployments/helm/k8s-nim-operator/templates/proxy-rbac.yaml b/deployments/helm/k8s-nim-operator/templates/proxy-rbac.yaml deleted file mode 100644 index ab873d43..00000000 --- a/deployments/helm/k8s-nim-operator/templates/proxy-rbac.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: k8s-nim-operator-proxy-role - labels: - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-nim-operator - app.kubernetes.io/part-of: k8s-nim-operator - {{- include "k8s-nim-operator.labels" . | nindent 4 }} -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: k8s-nim-operator-proxy-rolebinding - labels: - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: k8s-nim-operator - app.kubernetes.io/part-of: k8s-nim-operator - {{- include "k8s-nim-operator.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-nim-operator-proxy-role -subjects: -- kind: ServiceAccount - name: k8s-nim-operator - namespace: {{ .Release.Namespace }} diff --git a/deployments/helm/k8s-nim-operator/values.yaml b/deployments/helm/k8s-nim-operator/values.yaml index 55036fef..01edceb3 100644 --- a/deployments/helm/k8s-nim-operator/values.yaml +++ b/deployments/helm/k8s-nim-operator/values.yaml @@ -7,7 +7,7 @@ operator: pullPolicy: Always args: - --health-probe-bind-address=:8081 - - --metrics-bind-address=127.0.0.1:8080 + - --metrics-bind-address=:8080 - --leader-elect resources: limits: @@ -43,30 +43,11 @@ operator: operator: In values: [""] -kubeProxy: - image: - repository: gcr.io/kubebuilder/kube-rbac-proxy - tag: v0.15.0 - pullPolicy: IfNotPresent - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 50m - memory: 64Mi - metricsService: ports: - - name: https - port: 8443 + - name: metrics + port: 8080 protocol: TCP - targetPort: https type: ClusterIP nfd: diff --git a/go.mod b/go.mod index c3f41ccc..c6666127 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module github.com/NVIDIA/k8s-nim-operator -go 1.22.3 +go 1.23 -toolchain go1.22.5 +toolchain go1.23.1 require ( github.com/Masterminds/sprig/v3 v3.3.0 @@ -14,7 +14,7 @@ require ( github.com/onsi/gomega v1.34.2 github.com/openshift/api v0.0.0-20240708071937-c9a91940bf0f github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/stretchr/testify v1.9.0 gopkg.in/yaml.v2 v2.4.0 @@ -23,7 +23,7 @@ require ( k8s.io/apiextensions-apiserver v0.31.1 k8s.io/apimachinery v0.31.1 k8s.io/client-go v0.31.1 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 sigs.k8s.io/controller-runtime v0.19.0 sigs.k8s.io/yaml v1.4.0 ) @@ -40,9 +40,11 @@ require ( github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Microsoft/hcsshim v0.11.4 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect @@ -57,7 +59,7 @@ require ( github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-metrics v0.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect @@ -72,15 +74,16 @@ require ( github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/cel-go v0.21.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect @@ -91,6 +94,7 @@ require ( github.com/gorilla/websocket v1.5.1 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/huandu/xstrings v1.5.0 // indirect @@ -101,7 +105,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect - github.com/klauspost/compress v1.17.4 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.9 // indirect @@ -127,7 +131,7 @@ require ( github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rubenv/sql-migrate v1.7.0 // indirect @@ -139,34 +143,40 @@ require ( github.com/spf13/cast v1.7.0 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.31.0 // indirect + go.opentelemetry.io/otel/trace v1.31.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/net v0.28.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/crypto v0.28.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.24.0 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/term v0.25.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect + google.golang.org/grpc v1.67.1 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect @@ -175,10 +185,11 @@ require ( k8s.io/cli-runtime v0.31.0 // indirect k8s.io/component-base v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094 // indirect k8s.io/kubectl v0.31.0 // indirect oras.land/oras-go v1.2.5 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect sigs.k8s.io/node-feature-discovery v0.15.4 // indirect diff --git a/go.sum b/go.sum index 168ed76c..8623835d 100644 --- a/go.sum +++ b/go.sum @@ -35,6 +35,8 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= @@ -54,6 +56,8 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0Bsq github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= @@ -100,8 +104,8 @@ github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arX github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/go-restful/v3 v3.11.1 h1:S+9bSbua1z3FgCnV0KKOSSZ3mDthb5NyEPL5gEpCvyk= -github.com/emicklei/go-restful/v3 v3.11.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= @@ -146,12 +150,12 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8= -github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -173,6 +177,8 @@ github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.21.0 h1:cl6uW/gxN+Hy50tNYvI691+sXxioCnstFzLp2WO4GCI= +github.com/google/cel-go v0.21.0/go.mod h1:rHUlWCcBKgyEk+eV03RPdZUekPp6YcJwV0FxuUksYxc= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -198,6 +204,8 @@ github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -226,8 +234,8 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4 github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -237,6 +245,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= @@ -318,16 +328,16 @@ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.76.1/g github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= @@ -336,8 +346,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rubenv/sql-migrate v1.7.0 h1:HtQq1xyTN2ISmQDggnh0c9U3JlP8apWh8YO2jzlXpTI= github.com/rubenv/sql-migrate v1.7.0/go.mod h1:S4wtDEG1CKn+0ShpTtzWhFpHHI5PvCUtiGI+C+Z2THE= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -358,8 +368,12 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -367,6 +381,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -393,14 +410,22 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -417,16 +442,16 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= +golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -439,10 +464,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -472,15 +497,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= +golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -488,30 +513,32 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= +google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= +google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -551,18 +578,20 @@ k8s.io/component-base v0.31.1 h1:UpOepcrX3rQ3ab5NB6g5iP0tvsgJWzxTyAo20sgYSy8= k8s.io/component-base v0.31.1/go.mod h1:WGeaw7t/kTsqpVTaCoVEtillbqAhF2/JgvO0LDOMa0w= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094 h1:MErs8YA0abvOqJ8gIupA1Tz6PKXYUw34XsGlA7uSL1k= +k8s.io/kube-openapi v0.0.0-20241009091222-67ed5848f094/go.mod h1:7ioBJr1A6igWjsR2fxq2EZ0mlMwYLejazSIc2bzMp2U= k8s.io/kubectl v0.31.0 h1:kANwAAPVY02r4U4jARP/C+Q1sssCcN/1p9Nk+7BQKVg= k8s.io/kubectl v0.31.0/go.mod h1:pB47hhFypGsaHAPjlwrNbvhXgmuAr01ZBvAIIUaI8d4= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6 h1:MDF6h2H/h4tbzmtIKTuctcwZmY0tY9mD9fNT47QO6HI= +k8s.io/utils v0.0.0-20240921022957-49e7df575cb6/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZgIYqye0= sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= diff --git a/vendor/github.com/antlr4-go/antlr/v4/.gitignore b/vendor/github.com/antlr4-go/antlr/v4/.gitignore new file mode 100644 index 00000000..38ea34ff --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/.gitignore @@ -0,0 +1,18 @@ +### Go template + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + + +# Go workspace file +go.work + +# No Goland stuff in this repo +.idea diff --git a/vendor/github.com/antlr4-go/antlr/v4/LICENSE b/vendor/github.com/antlr4-go/antlr/v4/LICENSE new file mode 100644 index 00000000..a22292eb --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither name of copyright holders nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/antlr4-go/antlr/v4/README.md b/vendor/github.com/antlr4-go/antlr/v4/README.md new file mode 100644 index 00000000..03e5b83e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/README.md @@ -0,0 +1,54 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/antlr4-go/antlr?style=flat-square)](https://goreportcard.com/report/github.com/antlr4-go/antlr) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/github.com/antlr4-go/antlr)](https://pkg.go.dev/github.com/antlr4-go/antlr) +[![Release](https://img.shields.io/github/v/release/antlr4-go/antlr?sort=semver&style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Release](https://img.shields.io/github/go-mod/go-version/antlr4-go/antlr?style=flat-square)](https://github.com/antlr4-go/antlr/releases/latest) +[![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg?style=flat-square)](https://github.com/antlr4-go/antlr/commit-activity) +[![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) +[![GitHub stars](https://img.shields.io/github/stars/antlr4-go/antlr?style=flat-square&label=Star&maxAge=2592000)](https://GitHub.com/Naereen/StrapDown.js/stargazers/) +# ANTLR4 Go Runtime Module Repo + +IMPORTANT: Please submit PRs via a clone of the https://github.com/antlr/antlr4 repo, and not here. + + - Do not submit PRs or any change requests to this repo + - This repo is read only and is updated by the ANTLR team to create a new release of the Go Runtime for ANTLR + - This repo contains the Go runtime that your generated projects should import + +## Introduction + +This repo contains the official modules for the Go Runtime for ANTLR. It is a copy of the runtime maintained +at: https://github.com/antlr/antlr4/tree/master/runtime/Go/antlr and is automatically updated by the ANTLR team to create +the official Go runtime release only. No development work is carried out in this repo and PRs are not accepted here. + +The dev branch of this repo is kept in sync with the dev branch of the main ANTLR repo and is updated periodically. + +### Why? + +The `go get` command is unable to retrieve the Go runtime when it is embedded so +deeply in the main repo. A `go get` against the `antlr/antlr4` repo, while retrieving the correct source code for the runtime, +does not correctly resolve tags and will create a reference in your `go.mod` file that is unclear, will not upgrade smoothly and +causes confusion. + +For instance, the current Go runtime release, which is tagged with v4.13.0 in `antlr/antlr4` is retrieved by go get as: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230219212500-1f9a474cc2dc +) +``` + +Where you would expect to see: + +```sh +require ( + github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.13.0 +) +``` + +The decision was taken to create a separate org in a separate repo to hold the official Go runtime for ANTLR and +from whence users can expect `go get` to behave as expected. + + +# Documentation +Please read the official documentation at: https://github.com/antlr/antlr4/blob/master/doc/index.md for tips on +migrating existing projects to use the new module location and for information on how to use the Go runtime in +general. diff --git a/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go new file mode 100644 index 00000000..48bd362b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/antlrdoc.go @@ -0,0 +1,102 @@ +/* +Package antlr implements the Go version of the ANTLR 4 runtime. + +# The ANTLR Tool + +ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, +or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. +From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface +(or visitor) that makes it easy to respond to the recognition of phrases of interest. + +# Go Runtime + +At version 4.11.x and prior, the Go runtime was not properly versioned for go modules. After this point, the runtime +source code to be imported was held in the `runtime/Go/antlr/v4` directory, and the go.mod file was updated to reflect the version of +ANTLR4 that it is compatible with (I.E. uses the /v4 path). + +However, this was found to be problematic, as it meant that with the runtime embedded so far underneath the root +of the repo, the `go get` and related commands could not properly resolve the location of the go runtime source code. +This meant that the reference to the runtime in your `go.mod` file would refer to the correct source code, but would not +list the release tag such as @4.13.1 - this was confusing, to say the least. + +As of 4.13.0, the runtime is now available as a go module in its own repo, and can be imported as `github.com/antlr4-go/antlr` +(the go get command should also be used with this path). See the main documentation for the ANTLR4 project for more information, +which is available at [ANTLR docs]. The documentation for using the Go runtime is available at [Go runtime docs]. + +This means that if you are using the source code without modules, you should also use the source code in the [new repo]. +Though we highly recommend that you use go modules, as they are now idiomatic for Go. + +I am aware that this change will prove Hyrum's Law, but am prepared to live with it for the common good. + +Go runtime author: [Jim Idle] jimi@idle.ws + +# Code Generation + +ANTLR supports the generation of code in a number of [target languages], and the generated code is supported by a +runtime library, written specifically to support the generated code in the target language. This library is the +runtime for the Go target. + +To generate code for the go target, it is generally recommended to place the source grammar files in a package of +their own, and use the `.sh` script method of generating code, using the go generate directive. In that same directory +it is usual, though not required, to place the antlr tool that should be used to generate the code. That does mean +that the antlr tool JAR file will be checked in to your source code control though, so you are, of course, free to use any other +way of specifying the version of the ANTLR tool to use, such as aliasing in `.zshrc` or equivalent, or a profile in +your IDE, or configuration in your CI system. Checking in the jar does mean that it is easy to reproduce the build as +it was at any point in its history. + +Here is a general/recommended template for an ANTLR based recognizer in Go: + + . + ├── parser + │ ├── mygrammar.g4 + │ ├── antlr-4.13.1-complete.jar + │ ├── generate.go + │ └── generate.sh + ├── parsing - generated code goes here + │ └── error_listeners.go + ├── go.mod + ├── go.sum + ├── main.go + └── main_test.go + +Make sure that the package statement in your grammar file(s) reflects the go package the generated code will exist in. + +The generate.go file then looks like this: + + package parser + + //go:generate ./generate.sh + +And the generate.sh file will look similar to this: + + #!/bin/sh + + alias antlr4='java -Xmx500M -cp "./antlr4-4.13.1-complete.jar:$CLASSPATH" org.antlr.v4.Tool' + antlr4 -Dlanguage=Go -no-visitor -package parsing *.g4 + +depending on whether you want visitors or listeners or any other ANTLR options. Not that another option here +is to generate the code into a + +From the command line at the root of your source package (location of go.mo)d) you can then simply issue the command: + + go generate ./... + +Which will generate the code for the parser, and place it in the parsing package. You can then use the generated code +by importing the parsing package. + +There are no hard and fast rules on this. It is just a recommendation. You can generate the code in any way and to anywhere you like. + +# Copyright Notice + +Copyright (c) 2012-2023 The ANTLR Project. All rights reserved. + +Use of this file is governed by the BSD 3-clause license, which can be found in the [LICENSE.txt] file in the project root. + +[target languages]: https://github.com/antlr/antlr4/tree/master/runtime +[LICENSE.txt]: https://github.com/antlr/antlr4/blob/master/LICENSE.txt +[ANTLR docs]: https://github.com/antlr/antlr4/blob/master/doc/index.md +[new repo]: https://github.com/antlr4-go/antlr +[Jim Idle]: https://github.com/jimidle +[Go runtime docs]: https://github.com/antlr/antlr4/blob/master/doc/go-target.md +*/ +package antlr diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn.go b/vendor/github.com/antlr4-go/antlr/v4/atn.go new file mode 100644 index 00000000..e749ebd0 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn.go @@ -0,0 +1,177 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// ATNInvalidAltNumber is used to represent an ALT number that has yet to be calculated or +// which is invalid for a particular struct such as [*antlr.BaseRuleContext] +var ATNInvalidAltNumber int + +// ATN represents an “[Augmented Transition Network]”, though general in ANTLR the term +// “Augmented Recursive Transition Network” though there are some descriptions of “[Recursive Transition Network]” +// in existence. +// +// ATNs represent the main networks in the system and are serialized by the code generator and support [ALL(*)]. +// +// [Augmented Transition Network]: https://en.wikipedia.org/wiki/Augmented_transition_network +// [ALL(*)]: https://www.antlr.org/papers/allstar-techreport.pdf +// [Recursive Transition Network]: https://en.wikipedia.org/wiki/Recursive_transition_network +type ATN struct { + + // DecisionToState is the decision points for all rules, sub-rules, optional + // blocks, ()+, ()*, etc. Each sub-rule/rule is a decision point, and we must track them, so we + // can go back later and build DFA predictors for them. This includes + // all the rules, sub-rules, optional blocks, ()+, ()* etc... + DecisionToState []DecisionState + + // grammarType is the ATN type and is used for deserializing ATNs from strings. + grammarType int + + // lexerActions is referenced by action transitions in the ATN for lexer ATNs. + lexerActions []LexerAction + + // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. + maxTokenType int + + modeNameToStartState map[string]*TokensStartState + + modeToStartState []*TokensStartState + + // ruleToStartState maps from rule index to starting state number. + ruleToStartState []*RuleStartState + + // ruleToStopState maps from rule index to stop state number. + ruleToStopState []*RuleStopState + + // ruleToTokenType maps the rule index to the resulting token type for lexer + // ATNs. For parser ATNs, it maps the rule index to the generated bypass token + // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was + // specified, and otherwise is nil. + ruleToTokenType []int + + // ATNStates is a list of all states in the ATN, ordered by state number. + // + states []ATNState + + mu Mutex + stateMu RWMutex + edgeMu RWMutex +} + +// NewATN returns a new ATN struct representing the given grammarType and is used +// for runtime deserialization of ATNs from the code generated by the ANTLR tool +func NewATN(grammarType int, maxTokenType int) *ATN { + return &ATN{ + grammarType: grammarType, + maxTokenType: maxTokenType, + modeNameToStartState: make(map[string]*TokensStartState), + } +} + +// NextTokensInContext computes and returns the set of valid tokens that can occur starting +// in state s. If ctx is nil, the set of tokens will not include what can follow +// the rule surrounding s. In other words, the set will be restricted to tokens +// reachable staying within the rule of s. +func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { + return NewLL1Analyzer(a).Look(s, nil, ctx) +} + +// NextTokensNoContext computes and returns the set of valid tokens that can occur starting +// in state s and staying in same rule. [antlr.Token.EPSILON] is in set if we reach end of +// rule. +func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { + a.mu.Lock() + defer a.mu.Unlock() + iset := s.GetNextTokenWithinRule() + if iset == nil { + iset = a.NextTokensInContext(s, nil) + iset.readOnly = true + s.SetNextTokenWithinRule(iset) + } + return iset +} + +// NextTokens computes and returns the set of valid tokens starting in state s, by +// calling either [NextTokensNoContext] (ctx == nil) or [NextTokensInContext] (ctx != nil). +func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { + if ctx == nil { + return a.NextTokensNoContext(s) + } + + return a.NextTokensInContext(s, ctx) +} + +func (a *ATN) addState(state ATNState) { + if state != nil { + state.SetATN(a) + state.SetStateNumber(len(a.states)) + } + + a.states = append(a.states, state) +} + +func (a *ATN) removeState(state ATNState) { + a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice +} + +func (a *ATN) defineDecisionState(s DecisionState) int { + a.DecisionToState = append(a.DecisionToState, s) + s.setDecision(len(a.DecisionToState) - 1) + + return s.getDecision() +} + +func (a *ATN) getDecisionState(decision int) DecisionState { + if len(a.DecisionToState) == 0 { + return nil + } + + return a.DecisionToState[decision] +} + +// getExpectedTokens computes the set of input symbols which could follow ATN +// state number stateNumber in the specified full parse context ctx and returns +// the set of potentially valid input symbols which could follow the specified +// state in the specified context. This method considers the complete parser +// context, but does not evaluate semantic predicates (i.e. all predicates +// encountered during the calculation are assumed true). If a path in the ATN +// exists from the starting state to the RuleStopState of the outermost context +// without Matching any symbols, Token.EOF is added to the returned set. +// +// A nil ctx defaults to ParserRuleContext.EMPTY. +// +// It panics if the ATN does not contain state stateNumber. +func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(a.states) { + panic("Invalid state number.") + } + + s := a.states[stateNumber] + following := a.NextTokens(s, nil) + + if !following.contains(TokenEpsilon) { + return following + } + + expected := NewIntervalSet() + + expected.addSet(following) + expected.removeOne(TokenEpsilon) + + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := a.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + + following = a.NextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.GetParent().(RuleContext) + } + + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + + return expected +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go new file mode 100644 index 00000000..267308bb --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config.go @@ -0,0 +1,332 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +const ( + lexerConfig = iota // Indicates that this ATNConfig is for a lexer + parserConfig // Indicates that this ATNConfig is for a parser +) + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive in the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context *PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int + cType int // lexerConfig or parserConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +// NewATNConfig6 creates a new ATNConfig instance given a state, alt and context only +func NewATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + return NewATNConfig5(state, alt, context, SemanticContextNone) +} + +// NewATNConfig5 creates a new ATNConfig instance given a state, alt, context and semantic context +func NewATNConfig5(state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + pac := &ATNConfig{} + pac.state = state + pac.alt = alt + pac.context = context + pac.semanticContext = semanticContext + pac.cType = parserConfig + return pac +} + +// NewATNConfig4 creates a new ATNConfig instance given an existing config, and a state only +func NewATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +// NewATNConfig3 creates a new ATNConfig instance given an existing config, a state and a semantic context +func NewATNConfig3(c *ATNConfig, state ATNState, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, state, c.GetContext(), semanticContext) +} + +// NewATNConfig2 creates a new ATNConfig instance given an existing config, and a context only +func NewATNConfig2(c *ATNConfig, semanticContext SemanticContext) *ATNConfig { + return NewATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +// NewATNConfig1 creates a new ATNConfig instance given an existing config, a state, and a context only +func NewATNConfig1(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + return NewATNConfig(c, state, context, c.GetSemanticContext()) +} + +// NewATNConfig creates a new ATNConfig instance given an existing config, a state, a context and a semantic context, other 'constructors' +// are just wrappers around this one. +func NewATNConfig(c *ATNConfig, state ATNState, context *PredictionContext, semanticContext SemanticContext) *ATNConfig { + b := &ATNConfig{} + b.InitATNConfig(c, state, c.GetAlt(), context, semanticContext) + b.cType = parserConfig + return b +} + +func (a *ATNConfig) InitATNConfig(c *ATNConfig, state ATNState, alt int, context *PredictionContext, semanticContext SemanticContext) { + + a.state = state + a.alt = alt + a.context = context + a.semanticContext = semanticContext + a.reachesIntoOuterContext = c.GetReachesIntoOuterContext() + a.precedenceFilterSuppressed = c.getPrecedenceFilterSuppressed() +} + +func (a *ATNConfig) getPrecedenceFilterSuppressed() bool { + return a.precedenceFilterSuppressed +} + +func (a *ATNConfig) setPrecedenceFilterSuppressed(v bool) { + a.precedenceFilterSuppressed = v +} + +// GetState returns the ATN state associated with this configuration +func (a *ATNConfig) GetState() ATNState { + return a.state +} + +// GetAlt returns the alternative associated with this configuration +func (a *ATNConfig) GetAlt() int { + return a.alt +} + +// SetContext sets the rule invocation stack associated with this configuration +func (a *ATNConfig) SetContext(v *PredictionContext) { + a.context = v +} + +// GetContext returns the rule invocation stack associated with this configuration +func (a *ATNConfig) GetContext() *PredictionContext { + return a.context +} + +// GetSemanticContext returns the semantic context associated with this configuration +func (a *ATNConfig) GetSemanticContext() SemanticContext { + return a.semanticContext +} + +// GetReachesIntoOuterContext returns the count of references to an outer context from this configuration +func (a *ATNConfig) GetReachesIntoOuterContext() int { + return a.reachesIntoOuterContext +} + +// SetReachesIntoOuterContext sets the count of references to an outer context from this configuration +func (a *ATNConfig) SetReachesIntoOuterContext(v int) { + a.reachesIntoOuterContext = v +} + +// Equals is the default comparison function for an ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) Equals(o Collectable[*ATNConfig]) bool { + switch a.cType { + case lexerConfig: + return a.LEquals(o) + case parserConfig: + return a.PEquals(o) + default: + panic("Invalid ATNConfig type") + } +} + +// PEquals is the default comparison function for a Parser ATNConfig when no specialist implementation is required +// for a collection. +// +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (a *ATNConfig) PEquals(o Collectable[*ATNConfig]) bool { + var other, ok = o.(*ATNConfig) + + if !ok { + return false + } + if a == other { + return true + } else if other == nil { + return false + } + + var equal bool + + if a.context == nil { + equal = other.context == nil + } else { + equal = a.context.Equals(other.context) + } + + var ( + nums = a.state.GetStateNumber() == other.state.GetStateNumber() + alts = a.alt == other.alt + cons = a.semanticContext.Equals(other.semanticContext) + sups = a.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +// Hash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) Hash() int { + switch a.cType { + case lexerConfig: + return a.LHash() + case parserConfig: + return a.PHash() + default: + panic("Invalid ATNConfig type") + } +} + +// PHash is the default hash function for a parser ATNConfig, when no specialist hash function +// is required for a collection +func (a *ATNConfig) PHash() int { + var c int + if a.context != nil { + c = a.context.Hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, a.semanticContext.Hash()) + return murmurFinish(h, 4) +} + +// String returns a string representation of the ATNConfig, usually used for debugging purposes +func (a *ATNConfig) String() string { + var s1, s2, s3 string + + if a.context != nil { + s1 = ",[" + fmt.Sprint(a.context) + "]" + } + + if a.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(a.semanticContext) + } + + if a.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(a.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", a.state, a.alt, s1, s2, s3) +} + +func NewLexerATNConfig6(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig4(c *ATNConfig, state ATNState) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig3(c *ATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), c.GetContext(), c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +func NewLexerATNConfig2(c *ATNConfig, state ATNState, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.lexerActionExecutor = c.lexerActionExecutor + lac.passedThroughNonGreedyDecision = checkNonGreedyDecision(c, state) + lac.InitATNConfig(c, state, c.GetAlt(), context, c.GetSemanticContext()) + lac.cType = lexerConfig + return lac +} + +//goland:noinspection GoUnusedExportedFunction +func NewLexerATNConfig1(state ATNState, alt int, context *PredictionContext) *ATNConfig { + lac := &ATNConfig{} + lac.state = state + lac.alt = alt + lac.context = context + lac.semanticContext = SemanticContextNone + lac.cType = lexerConfig + return lac +} + +// LHash is the default hash function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LHash() int { + var f int + if a.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, a.state.GetStateNumber()) + h = murmurUpdate(h, a.alt) + h = murmurUpdate(h, a.context.Hash()) + h = murmurUpdate(h, a.semanticContext.Hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, a.lexerActionExecutor.Hash()) + h = murmurFinish(h, 6) + return h +} + +// LEquals is the default comparison function for Lexer ATNConfig objects, it can be used directly or via +// the default comparator [ObjEqComparator]. +func (a *ATNConfig) LEquals(other Collectable[*ATNConfig]) bool { + var otherT, ok = other.(*ATNConfig) + if !ok { + return false + } else if a == otherT { + return true + } else if a.passedThroughNonGreedyDecision != otherT.passedThroughNonGreedyDecision { + return false + } + + switch { + case a.lexerActionExecutor == nil && otherT.lexerActionExecutor == nil: + return true + case a.lexerActionExecutor != nil && otherT.lexerActionExecutor != nil: + if !a.lexerActionExecutor.Equals(otherT.lexerActionExecutor) { + return false + } + default: + return false // One but not both, are nil + } + + return a.PEquals(otherT) +} + +func checkNonGreedyDecision(source *ATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go new file mode 100644 index 00000000..52dbaf80 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_config_set.go @@ -0,0 +1,301 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// ATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type ATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two ATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *JStore[*ATNConfig, Comparator[*ATNConfig]] + + // configs is the added elements that did not match an existing key in configLookup + configs []*ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the ATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from this set. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not, protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves re-computation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +// Alts returns the combined set of alts for all the configurations in this set. +func (b *ATNConfigSet) Alts() *BitSet { + alts := NewBitSet() + for _, it := range b.configs { + alts.add(it.GetAlt()) + } + return alts +} + +// NewATNConfigSet creates a new ATNConfigSet instance. +func NewATNConfigSet(fullCtx bool) *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()"), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), +// where 's' is the ATNConfig.state, 'i' is the ATNConfig.alt, and +// 'pi' is the [ATNConfig].semanticContext. +// +// We use (s,i,pi) as the key. +// Updates dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *ATNConfigSet) Add(config *ATNConfig, mergeCache *JPCMap) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing, present := b.configLookup.Put(config) + + // The config was not already in the set + // + if !present { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +// GetStates returns the set of states represented by all configurations in this config set +func (b *ATNConfigSet) GetStates() *JStore[ATNState, Comparator[ATNState]] { + + // states uses the standard comparator and Hash() provided by the ATNState instance + // + states := NewJStore[ATNState, Comparator[ATNState]](aStateEqInst, ATNStateCollection, "ATNConfigSet.GetStates()") + + for i := 0; i < len(b.configs); i++ { + states.Put(b.configs[i].GetState()) + } + + return states +} + +func (b *ATNConfigSet) GetPredicates() []SemanticContext { + predicates := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + predicates = append(predicates, c) + } + } + + return predicates +} + +func (b *ATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + // Empty indicate no optimization is possible + if b.configLookup == nil || b.configLookup.Len() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *ATNConfigSet) AddAll(coll []*ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +// Compare The configs are only equal if they are in the same order and their Equals function returns true. +// Java uses ArrayList.equals(), which requires the same order. +func (b *ATNConfigSet) Compare(bs *ATNConfigSet) bool { + if len(b.configs) != len(bs.configs) { + return false + } + for i := 0; i < len(b.configs); i++ { + if !b.configs[i].Equals(bs.configs[i]) { + return false + } + } + + return true +} + +func (b *ATNConfigSet) Equals(other Collectable[ATNConfig]) bool { + if b == other { + return true + } else if _, ok := other.(*ATNConfigSet); !ok { + return false + } + + other2 := other.(*ATNConfigSet) + var eca bool + switch { + case b.conflictingAlts == nil && other2.conflictingAlts == nil: + eca = true + case b.conflictingAlts != nil && other2.conflictingAlts != nil: + eca = b.conflictingAlts.equals(other2.conflictingAlts) + } + return b.configs != nil && + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + eca && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext && + b.Compare(other2) +} + +func (b *ATNConfigSet) Hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *ATNConfigSet) hashCodeConfigs() int { + h := 1 + for _, config := range b.configs { + h = 31*h + config.Hash() + } + return h +} + +func (b *ATNConfigSet) Contains(item *ATNConfig) bool { + if b.readOnly { + panic("not implemented for read-only sets") + } + if b.configLookup == nil { + return false + } + return b.configLookup.Contains(item) +} + +func (b *ATNConfigSet) ContainsFast(item *ATNConfig) bool { + return b.Contains(item) +} + +func (b *ATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + b.configs = make([]*ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfCompInst, ATNConfigLookupCollection, "NewATNConfigSet()") +} + +func (b *ATNConfigSet) String() string { + + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +// NewOrderedATNConfigSet creates a config set with a slightly different Hash/Equal pair +// for use in lexers. +func NewOrderedATNConfigSet() *ATNConfigSet { + return &ATNConfigSet{ + cachedHash: -1, + // This set uses the standard Hash() and Equals() from ATNConfig + configLookup: NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ATNConfigCollection, "ATNConfigSet.NewOrderedATNConfigSet()"), + fullCtx: false, + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go new file mode 100644 index 00000000..bdb30b36 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserialization_options.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "errors" + +var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false} + +type ATNDeserializationOptions struct { + readOnly bool + verifyATN bool + generateRuleBypassTransitions bool +} + +func (opts *ATNDeserializationOptions) ReadOnly() bool { + return opts.readOnly +} + +func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) { + if opts.readOnly { + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) + } + opts.readOnly = readOnly +} + +func (opts *ATNDeserializationOptions) VerifyATN() bool { + return opts.verifyATN +} + +func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) { + if opts.readOnly { + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) + } + opts.verifyATN = verifyATN +} + +func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool { + return opts.generateRuleBypassTransitions +} + +func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) { + if opts.readOnly { + panic(errors.New("cannot mutate read only ATNDeserializationOptions")) + } + opts.generateRuleBypassTransitions = generateRuleBypassTransitions +} + +//goland:noinspection GoUnusedExportedFunction +func DefaultATNDeserializationOptions() *ATNDeserializationOptions { + return NewATNDeserializationOptions(&defaultATNDeserializationOptions) +} + +func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions { + o := new(ATNDeserializationOptions) + if other != nil { + *o = *other + o.readOnly = false + } + return o +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go new file mode 100644 index 00000000..2dcb9ae1 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_deserializer.go @@ -0,0 +1,684 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +const serializedVersion = 4 + +type loopEndStateIntPair struct { + item0 *LoopEndState + item1 int +} + +type blockStartStateIntPair struct { + item0 BlockStartState + item1 int +} + +type ATNDeserializer struct { + options *ATNDeserializationOptions + data []int32 + pos int +} + +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { + if options == nil { + options = &defaultATNDeserializationOptions + } + + return &ATNDeserializer{options: options} +} + +//goland:noinspection GoUnusedFunction +func stringInSlice(a string, list []string) int { + for i, b := range list { + if b == a { + return i + } + } + + return -1 +} + +func (a *ATNDeserializer) Deserialize(data []int32) *ATN { + a.data = data + a.pos = 0 + a.checkVersion() + + atn := a.readATN() + + a.readStates(atn) + a.readRules(atn) + a.readModes(atn) + + sets := a.readSets(atn, nil) + + a.readEdges(atn, sets) + a.readDecisions(atn) + a.readLexerActions(atn) + a.markPrecedenceDecisions(atn) + a.verifyATN(atn) + + if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser { + a.generateRuleBypassTransitions(atn) + // Re-verify after modification + a.verifyATN(atn) + } + + return atn + +} + +func (a *ATNDeserializer) checkVersion() { + version := a.readInt() + + if version != serializedVersion { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").") + } +} + +func (a *ATNDeserializer) readATN() *ATN { + grammarType := a.readInt() + maxTokenType := a.readInt() + + return NewATN(grammarType, maxTokenType) +} + +func (a *ATNDeserializer) readStates(atn *ATN) { + nstates := a.readInt() + + // Allocate worst case size. + loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates) + endStateNumbers := make([]blockStartStateIntPair, 0, nstates) + + // Preallocate states slice. + atn.states = make([]ATNState, 0, nstates) + + for i := 0; i < nstates; i++ { + stype := a.readInt() + + // Ignore bad types of states + if stype == ATNStateInvalidType { + atn.addState(nil) + continue + } + + ruleIndex := a.readInt() + + s := a.stateFactory(stype, ruleIndex) + + if stype == ATNStateLoopEnd { + loopBackStateNumber := a.readInt() + + loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) + } else if s2, ok := s.(BlockStartState); ok { + endStateNumber := a.readInt() + + endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber}) + } + + atn.addState(s) + } + + // Delay the assignment of loop back and end states until we know all the state + // instances have been initialized + for _, pair := range loopBackStateNumbers { + pair.item0.loopBackState = atn.states[pair.item1] + } + + for _, pair := range endStateNumbers { + pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) + } + + numNonGreedyStates := a.readInt() + for j := 0; j < numNonGreedyStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(DecisionState).setNonGreedy(true) + } + + numPrecedenceStates := a.readInt() + for j := 0; j < numPrecedenceStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true + } +} + +func (a *ATNDeserializer) readRules(atn *ATN) { + nrules := a.readInt() + + if atn.grammarType == ATNTypeLexer { + atn.ruleToTokenType = make([]int, nrules) + } + + atn.ruleToStartState = make([]*RuleStartState, nrules) + + for i := range atn.ruleToStartState { + s := a.readInt() + startState := atn.states[s].(*RuleStartState) + + atn.ruleToStartState[i] = startState + + if atn.grammarType == ATNTypeLexer { + tokenType := a.readInt() + + atn.ruleToTokenType[i] = tokenType + } + } + + atn.ruleToStopState = make([]*RuleStopState, nrules) + + for _, state := range atn.states { + if s2, ok := state.(*RuleStopState); ok { + atn.ruleToStopState[s2.ruleIndex] = s2 + atn.ruleToStartState[s2.ruleIndex].stopState = s2 + } + } +} + +func (a *ATNDeserializer) readModes(atn *ATN) { + nmodes := a.readInt() + atn.modeToStartState = make([]*TokensStartState, nmodes) + + for i := range atn.modeToStartState { + s := a.readInt() + + atn.modeToStartState[i] = atn.states[s].(*TokensStartState) + } +} + +func (a *ATNDeserializer) readSets(_ *ATN, sets []*IntervalSet) []*IntervalSet { + m := a.readInt() + + // Preallocate the needed capacity. + if cap(sets)-len(sets) < m { + isets := make([]*IntervalSet, len(sets), len(sets)+m) + copy(isets, sets) + sets = isets + } + + for i := 0; i < m; i++ { + iset := NewIntervalSet() + + sets = append(sets, iset) + + n := a.readInt() + containsEOF := a.readInt() + + if containsEOF != 0 { + iset.addOne(-1) + } + + for j := 0; j < n; j++ { + i1 := a.readInt() + i2 := a.readInt() + + iset.addRange(i1, i2) + } + } + + return sets +} + +func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { + nedges := a.readInt() + + for i := 0; i < nedges; i++ { + var ( + src = a.readInt() + trg = a.readInt() + ttype = a.readInt() + arg1 = a.readInt() + arg2 = a.readInt() + arg3 = a.readInt() + trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + ) + + srcState.AddTransition(trans, -1) + } + + // Edges for rule stop states can be derived, so they are not serialized + for _, state := range atn.states { + for _, t := range state.GetTransitions() { + var rt, ok = t.(*RuleTransition) + + if !ok { + continue + } + + outermostPrecedenceReturn := -1 + + if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule { + if rt.precedence == 0 { + outermostPrecedenceReturn = rt.getTarget().GetRuleIndex() + } + } + + trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn) + + atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1) + } + } + + for _, state := range atn.states { + if s2, ok := state.(BlockStartState); ok { + // We need to know the end state to set its start state + if s2.getEndState() == nil { + panic("IllegalState") + } + + // Block end states can only be associated to a single block start state + if s2.getEndState().startState != nil { + panic("IllegalState") + } + + s2.getEndState().startState = state + } + + if s2, ok := state.(*PlusLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*PlusBlockStartState); ok { + t2.loopBackState = state + } + } + } else if s2, ok := state.(*StarLoopbackState); ok { + for _, t := range s2.GetTransitions() { + if t2, ok := t.getTarget().(*StarLoopEntryState); ok { + t2.loopBackState = state + } + } + } + } +} + +func (a *ATNDeserializer) readDecisions(atn *ATN) { + ndecisions := a.readInt() + + for i := 0; i < ndecisions; i++ { + s := a.readInt() + decState := atn.states[s].(DecisionState) + + atn.DecisionToState = append(atn.DecisionToState, decState) + decState.setDecision(i) + } +} + +func (a *ATNDeserializer) readLexerActions(atn *ATN) { + if atn.grammarType == ATNTypeLexer { + count := a.readInt() + + atn.lexerActions = make([]LexerAction, count) + + for i := range atn.lexerActions { + actionType := a.readInt() + data1 := a.readInt() + data2 := a.readInt() + atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2) + } + } +} + +func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { + count := len(atn.ruleToStartState) + + for i := 0; i < count; i++ { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + } + + for i := 0; i < count; i++ { + a.generateRuleBypassTransition(atn, i) + } +} + +func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { + bypassStart := NewBasicBlockStartState() + + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop := NewBlockEndState() + + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + + atn.defineDecisionState(&bypassStart.BaseDecisionState) + + bypassStop.startState = bypassStart + + var excludeTransition Transition + var endState ATNState + + if atn.ruleToStartState[idx].isPrecedenceRule { + // Wrap from the beginning of the rule to the StarLoopEntryState + endState = nil + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if a.stateIsEndStateFor(state, idx) != nil { + endState = state + excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] + + break + } + } + + if excludeTransition == nil { + panic("Couldn't identify final state of the precedence rule prefix section.") + } + } else { + endState = atn.ruleToStopState[idx] + } + + // All non-excluded transitions that currently target end state need to target + // blockEnd instead + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + transition := state.GetTransitions()[j] + + if transition == excludeTransition { + continue + } + + if transition.getTarget() == endState { + transition.setTarget(bypassStop) + } + } + } + + // All transitions leaving the rule start state need to leave blockStart instead + ruleToStartState := atn.ruleToStartState[idx] + count := len(ruleToStartState.GetTransitions()) + + for count > 0 { + bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) + ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) + } + + // Link the new states + atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) + + MatchState := NewBasicState() + + atn.addState(MatchState) + MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) +} + +func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { + if state.GetRuleIndex() != idx { + return nil + } + + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } + + var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } + + return nil +} + +// markPrecedenceDecisions analyzes the StarLoopEntryState states in the +// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to +// the correct value. +func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { + for _, state := range atn.states { + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + + // We analyze the [ATN] to determine if an ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if s3.epsilonOnlyTransitions && ok2 { + state.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } + } +} + +func (a *ATNDeserializer) verifyATN(atn *ATN) { + if !a.options.VerifyATN() { + return + } + + // Verify assumptions + for _, state := range atn.states { + if state == nil { + continue + } + + a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") + + switch s2 := state.(type) { + case *PlusBlockStartState: + a.checkCondition(s2.loopBackState != nil, "") + + case *StarLoopEntryState: + a.checkCondition(s2.loopBackState != nil, "") + a.checkCondition(len(s2.GetTransitions()) == 2, "") + + switch s2.transitions[0].getTarget().(type) { + case *StarBlockStartState: + _, ok := s2.transitions[1].getTarget().(*LoopEndState) + + a.checkCondition(ok, "") + a.checkCondition(!s2.nonGreedy, "") + + case *LoopEndState: + var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState) + + a.checkCondition(ok, "") + a.checkCondition(s2.nonGreedy, "") + + default: + panic("IllegalState") + } + + case *StarLoopbackState: + a.checkCondition(len(state.GetTransitions()) == 1, "") + + var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) + + a.checkCondition(ok, "") + + case *LoopEndState: + a.checkCondition(s2.loopBackState != nil, "") + + case *RuleStartState: + a.checkCondition(s2.stopState != nil, "") + + case BlockStartState: + a.checkCondition(s2.getEndState() != nil, "") + + case *BlockEndState: + a.checkCondition(s2.startState != nil, "") + + case DecisionState: + a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") + + default: + var _, ok = s2.(*RuleStopState) + + a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") + } + } +} + +func (a *ATNDeserializer) checkCondition(condition bool, message string) { + if !condition { + if message == "" { + message = "IllegalState" + } + + panic(message) + } +} + +func (a *ATNDeserializer) readInt() int { + v := a.data[a.pos] + + a.pos++ + + return int(v) // data is 32 bits but int is at least that big +} + +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, _, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { + target := atn.states[trg] + + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } + + return NewRangeTransition(target, arg1, arg2) + + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } + + return NewAtomTransition(target, arg1) + + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + + case TransitionNOTSET: + return NewNotSetTransition(target, sets[arg1]) + + case TransitionWILDCARD: + return NewWildcardTransition(target) + } + + panic("The specified transition type is not valid.") +} + +func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { + var s ATNState + + switch typeIndex { + case ATNStateInvalidType: + return nil + + case ATNStateBasic: + s = NewBasicState() + + case ATNStateRuleStart: + s = NewRuleStartState() + + case ATNStateBlockStart: + s = NewBasicBlockStartState() + + case ATNStatePlusBlockStart: + s = NewPlusBlockStartState() + + case ATNStateStarBlockStart: + s = NewStarBlockStartState() + + case ATNStateTokenStart: + s = NewTokensStartState() + + case ATNStateRuleStop: + s = NewRuleStopState() + + case ATNStateBlockEnd: + s = NewBlockEndState() + + case ATNStateStarLoopBack: + s = NewStarLoopbackState() + + case ATNStateStarLoopEntry: + s = NewStarLoopEntryState() + + case ATNStatePlusLoopBack: + s = NewPlusLoopbackState() + + case ATNStateLoopEnd: + s = NewLoopEndState() + + default: + panic(fmt.Sprintf("state type %d is invalid", typeIndex)) + } + + s.SetRuleIndex(ruleIndex) + + return s +} + +func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { + switch typeIndex { + case LexerActionTypeChannel: + return NewLexerChannelAction(data1) + + case LexerActionTypeCustom: + return NewLexerCustomAction(data1, data2) + + case LexerActionTypeMode: + return NewLexerModeAction(data1) + + case LexerActionTypeMore: + return LexerMoreActionINSTANCE + + case LexerActionTypePopMode: + return LexerPopModeActionINSTANCE + + case LexerActionTypePushMode: + return NewLexerPushModeAction(data1) + + case LexerActionTypeSkip: + return LexerSkipActionINSTANCE + + case LexerActionTypeType: + return NewLexerTypeAction(data1) + + default: + panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go new file mode 100644 index 00000000..afe6c9f8 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_simulator.go @@ -0,0 +1,41 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func (b *BaseATNSimulator) getCachedContext(context *PredictionContext) *PredictionContext { + if b.sharedContextCache == nil { + return context + } + + //visited := NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionVisitedCollection, "Visit map in getCachedContext()") + visited := NewVisitRecord() + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_state.go b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go new file mode 100644 index 00000000..2ae5807c --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_state.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +//goland:noinspection GoUnusedGlobalVariable +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + Hash() int + Equals(Collectable[ATNState]) bool +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) Hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) Equals(other Collectable[ATNState]) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + _, _ = fmt.Fprintf(os.Stdin, "ATN state %d has both epsilon and non-epsilon transitions.\n", as.stateNumber) + as.epsilonOnlyTransitions = false + } + + // TODO: Check code for already present compared to the Java equivalent + //alreadyPresent := false + //for _, t := range as.transitions { + // if t.getTarget().GetStateNumber() == trans.getTarget().GetStateNumber() { + // if t.getLabel() != nil && trans.getLabel() != nil && trans.getLabel().Equals(t.getLabel()) { + // alreadyPresent = true + // break + // } + // } else if t.getIsEpsilon() && trans.getIsEpsilon() { + // alreadyPresent = true + // break + // } + //} + //if !alreadyPresent { + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } + //} else { + // _, _ = fmt.Fprintf(os.Stderr, "Transition already present in state %d\n", as.stateNumber) + //} +} + +type BasicState struct { + BaseATNState +} + +func NewBasicState() *BasicState { + return &BasicState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + } +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + } +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBasic, + }, + decision: -1, + }, + } +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + return &BasicBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &BasicBlockStartState{} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + return &BlockEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateBlockEnd, + }, + startState: nil, + } +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + BaseATNState +} + +func NewRuleStopState() *RuleStopState { + return &RuleStopState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStop, + }, + } +} + +type RuleStartState struct { + BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + return &RuleStartState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateRuleStart, + }, + } +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + return &PlusLoopbackState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusLoopBack, + }, + }, + } +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + return &PlusBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStatePlusBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &PlusBlockStartState{} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + return &StarBlockStartState{ + BaseBlockStartState: BaseBlockStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarBlockStart, + }, + }, + }, + } +} + +var _ BlockStartState = &StarBlockStartState{} + +type StarLoopbackState struct { + BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + return &StarLoopbackState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopBack, + }, + } +} + +type StarLoopEntryState struct { + BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateStarLoopEntry, + }, + }, + } +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + return &LoopEndState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateLoopEnd, + }, + } +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + return &TokensStartState{ + BaseDecisionState: BaseDecisionState{ + BaseATNState: BaseATNState{ + stateNumber: ATNStateInvalidStateNumber, + stateType: ATNStateTokenStart, + }, + }, + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/atn_type.go b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go new file mode 100644 index 00000000..3a515a14 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/atn_type.go @@ -0,0 +1,11 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represent the type of recognizer an ATN applies to. +const ( + ATNTypeLexer = 0 + ATNTypeParser = 1 +) diff --git a/vendor/github.com/antlr4-go/antlr/v4/char_stream.go b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go new file mode 100644 index 00000000..bd8127b6 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/char_stream.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type CharStream interface { + IntStream + GetText(int, int) string + GetTextFromTokens(start, end Token) string + GetTextFromInterval(Interval) string +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go new file mode 100644 index 00000000..1bb0314e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_factory.go @@ -0,0 +1,56 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// TokenFactory creates CommonToken objects. +type TokenFactory interface { + Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token +} + +// CommonTokenFactory is the default TokenFactory implementation. +type CommonTokenFactory struct { + // copyText indicates whether CommonToken.setText should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings of + // text from the input after the lexer creates a token (e.g. the + // implementation of CharStream.GetText in UnbufferedCharStream panics an + // UnsupportedOperationException). Explicitly setting the token text allows + // Token.GetText to be called at any time regardless of the input stream + // implementation. + // + // The default value is false to avoid the performance and memory overhead of + // copying text for every token unless explicitly requested. + copyText bool +} + +func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { + return &CommonTokenFactory{copyText: copyText} +} + +// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not +// explicitly copy token text when constructing tokens. +var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) + +func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { + t := NewCommonToken(source, ttype, channel, start, stop) + + t.line = line + t.column = column + + if text != "" { + t.SetText(text) + } else if c.copyText && source.charStream != nil { + t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) + } + + return t +} + +func (c *CommonTokenFactory) createThin(ttype int, text string) Token { + t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.SetText(text) + + return t +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go new file mode 100644 index 00000000..b75da9df --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/common_token_stream.go @@ -0,0 +1,450 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// CommonTokenStream is an implementation of TokenStream that loads tokens from +// a TokenSource on-demand and places the tokens in a buffer to provide access +// to any previous token by index. This token stream ignores the value of +// Token.getChannel. If your parser requires the token stream filter tokens to +// only those on a particular channel, such as Token.DEFAULT_CHANNEL or +// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. +type CommonTokenStream struct { + channel int + + // fetchedEOF indicates whether the Token.EOF token has been fetched from + // tokenSource and added to tokens. This field improves performance for the + // following cases: + // + // consume: The lookahead check in consume to preven consuming the EOF symbol is + // optimized by checking the values of fetchedEOF and p instead of calling LA. + // + // fetch: The check to prevent adding multiple EOF symbols into tokens is + // trivial with bt field. + fetchedEOF bool + + // index into [tokens] of the current token (next token to consume). + // tokens[p] should be LT(1). It is set to -1 when the stream is first + // constructed or when SetTokenSource is called, indicating that the first token + // has not yet been fetched from the token source. For additional information, + // see the documentation of [IntStream] for a description of initializing methods. + index int + + // tokenSource is the [TokenSource] from which tokens for the bt stream are + // fetched. + tokenSource TokenSource + + // tokens contains all tokens fetched from the token source. The list is considered a + // complete view of the input once fetchedEOF is set to true. + tokens []Token +} + +// NewCommonTokenStream creates a new CommonTokenStream instance using the supplied lexer to produce +// tokens and will pull tokens from the given lexer channel. +func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { + return &CommonTokenStream{ + channel: channel, + index: -1, + tokenSource: lexer, + tokens: make([]Token, 0), + } +} + +// GetAllTokens returns all tokens currently pulled from the token source. +func (c *CommonTokenStream) GetAllTokens() []Token { + return c.tokens +} + +func (c *CommonTokenStream) Mark() int { + return 0 +} + +func (c *CommonTokenStream) Release(_ int) {} + +func (c *CommonTokenStream) Reset() { + c.fetchedEOF = false + c.tokens = make([]Token, 0) + c.Seek(0) +} + +func (c *CommonTokenStream) Seek(index int) { + c.lazyInit() + c.index = c.adjustSeekIndex(index) +} + +func (c *CommonTokenStream) Get(index int) Token { + c.lazyInit() + + return c.tokens[index] +} + +func (c *CommonTokenStream) Consume() { + SkipEOFCheck := false + + if c.index >= 0 { + if c.fetchedEOF { + // The last token in tokens is EOF. Skip the check if p indexes any fetched. + // token except the last. + SkipEOFCheck = c.index < len(c.tokens)-1 + } else { + // No EOF token in tokens. Skip the check if p indexes a fetched token. + SkipEOFCheck = c.index < len(c.tokens) + } + } else { + // Not yet initialized + SkipEOFCheck = false + } + + if !SkipEOFCheck && c.LA(1) == TokenEOF { + panic("cannot consume EOF") + } + + if c.Sync(c.index + 1) { + c.index = c.adjustSeekIndex(c.index + 1) + } +} + +// Sync makes sure index i in tokens has a token and returns true if a token is +// located at index i and otherwise false. +func (c *CommonTokenStream) Sync(i int) bool { + n := i - len(c.tokens) + 1 // How many more elements do we need? + + if n > 0 { + fetched := c.fetch(n) + return fetched >= n + } + + return true +} + +// fetch adds n elements to buffer and returns the actual number of elements +// added to the buffer. +func (c *CommonTokenStream) fetch(n int) int { + if c.fetchedEOF { + return 0 + } + + for i := 0; i < n; i++ { + t := c.tokenSource.NextToken() + + t.SetTokenIndex(len(c.tokens)) + c.tokens = append(c.tokens, t) + + if t.GetTokenType() == TokenEOF { + c.fetchedEOF = true + + return i + 1 + } + } + + return n +} + +// GetTokens gets all tokens from start to stop inclusive. +func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { + if start < 0 || stop < 0 { + return nil + } + + c.lazyInit() + + subset := make([]Token, 0) + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + for i := start; i < stop; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + if types == nil || types.contains(t.GetTokenType()) { + subset = append(subset, t) + } + } + + return subset +} + +func (c *CommonTokenStream) LA(i int) int { + return c.LT(i).GetTokenType() +} + +func (c *CommonTokenStream) lazyInit() { + if c.index == -1 { + c.setup() + } +} + +func (c *CommonTokenStream) setup() { + c.Sync(0) + c.index = c.adjustSeekIndex(0) +} + +func (c *CommonTokenStream) GetTokenSource() TokenSource { + return c.tokenSource +} + +// SetTokenSource resets the c token stream by setting its token source. +func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { + c.tokenSource = tokenSource + c.tokens = make([]Token, 0) + c.index = -1 + c.fetchedEOF = false +} + +// NextTokenOnChannel returns the index of the next token on channel given a +// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are +// no tokens on channel between 'i' and [TokenEOF]. +func (c *CommonTokenStream) NextTokenOnChannel(i, _ int) int { + c.Sync(i) + + if i >= len(c.tokens) { + return -1 + } + + token := c.tokens[i] + + for token.GetChannel() != c.channel { + if token.GetTokenType() == TokenEOF { + return -1 + } + + i++ + c.Sync(i) + token = c.tokens[i] + } + + return i +} + +// previousTokenOnChannel returns the index of the previous token on channel +// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if +// there are no tokens on channel between i and 0. +func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { + for i >= 0 && c.tokens[i].GetChannel() != channel { + i-- + } + + return i +} + +// GetHiddenTokensToRight collects all tokens on a specified channel to the +// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL +// or EOF. If channel is -1, it finds any non-default channel token. +func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) + from := tokenIndex + 1 + + // If no onChannel to the right, then nextOnChannel == -1, so set 'to' to the last token + var to int + + if nextOnChannel == -1 { + to = len(c.tokens) - 1 + } else { + to = nextOnChannel + } + + return c.filterForChannel(from, to, channel) +} + +// GetHiddenTokensToLeft collects all tokens on channel to the left of the +// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is +// -1, it finds any non default channel token. +func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + + if prevOnChannel == tokenIndex-1 { + return nil + } + + // If there are none on channel to the left and prevOnChannel == -1 then from = 0 + from := prevOnChannel + 1 + to := tokenIndex - 1 + + return c.filterForChannel(from, to, channel) +} + +func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { + hidden := make([]Token, 0) + + for i := left; i < right+1; i++ { + t := c.tokens[i] + + if channel == -1 { + if t.GetChannel() != LexerDefaultTokenChannel { + hidden = append(hidden, t) + } + } else if t.GetChannel() == channel { + hidden = append(hidden, t) + } + } + + if len(hidden) == 0 { + return nil + } + + return hidden +} + +func (c *CommonTokenStream) GetSourceName() string { + return c.tokenSource.GetSourceName() +} + +func (c *CommonTokenStream) Size() int { + return len(c.tokens) +} + +func (c *CommonTokenStream) Index() int { + return c.index +} + +func (c *CommonTokenStream) GetAllText() string { + c.Fill() + return c.GetTextFromInterval(NewInterval(0, len(c.tokens)-1)) +} + +func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { + if start == nil || end == nil { + return "" + } + + return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) +} + +func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { + return c.GetTextFromInterval(interval.GetSourceInterval()) +} + +func (c *CommonTokenStream) GetTextFromInterval(interval Interval) string { + c.lazyInit() + c.Sync(interval.Stop) + + start := interval.Start + stop := interval.Stop + + if start < 0 || stop < 0 { + return "" + } + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + s := "" + + for i := start; i < stop+1; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + s += t.GetText() + } + + return s +} + +// Fill gets all tokens from the lexer until EOF. +func (c *CommonTokenStream) Fill() { + c.lazyInit() + + for c.fetch(1000) == 1000 { + continue + } +} + +func (c *CommonTokenStream) adjustSeekIndex(i int) int { + return c.NextTokenOnChannel(i, c.channel) +} + +func (c *CommonTokenStream) LB(k int) Token { + if k == 0 || c.index-k < 0 { + return nil + } + + i := c.index + n := 1 + + // Find k good tokens looking backward + for n <= k { + // Skip off-channel tokens + i = c.previousTokenOnChannel(i-1, c.channel) + n++ + } + + if i < 0 { + return nil + } + + return c.tokens[i] +} + +func (c *CommonTokenStream) LT(k int) Token { + c.lazyInit() + + if k == 0 { + return nil + } + + if k < 0 { + return c.LB(-k) + } + + i := c.index + n := 1 // We know tokens[n] is valid + + // Find k good tokens + for n < k { + // Skip off-channel tokens, but make sure to not look past EOF + if c.Sync(i + 1) { + i = c.NextTokenOnChannel(i+1, c.channel) + } + + n++ + } + + return c.tokens[i] +} + +// getNumberOfOnChannelTokens counts EOF once. +func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { + var n int + + c.Fill() + + for i := 0; i < len(c.tokens); i++ { + t := c.tokens[i] + + if t.GetChannel() == c.channel { + n++ + } + + if t.GetTokenType() == TokenEOF { + break + } + } + + return n +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/comparators.go b/vendor/github.com/antlr4-go/antlr/v4/comparators.go new file mode 100644 index 00000000..7467e9b4 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/comparators.go @@ -0,0 +1,150 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +// This file contains all the implementations of custom comparators used for generic collections when the +// Hash() and Equals() funcs supplied by the struct objects themselves need to be overridden. Normally, we would +// put the comparators in the source file for the struct themselves, but given the organization of this code is +// sorta kinda based upon the Java code, I found it confusing trying to find out which comparator was where and used by +// which instantiation of a collection. For instance, an Array2DHashSet in the Java source, when used with ATNConfig +// collections requires three different comparators depending on what the collection is being used for. Collecting - pun intended - +// all the comparators here, makes it much easier to see which implementation of hash and equals is used by which collection. +// It also makes it easy to verify that the Hash() and Equals() functions marry up with the Java implementations. + +// ObjEqComparator is the equivalent of the Java ObjectEqualityComparator, which is the default instance of +// Equality comparator. We do not have inheritance in Go, only interfaces, so we use generics to enforce some +// type safety and avoid having to implement this for every type that we want to perform comparison on. +// +// This comparator works by using the standard Hash() and Equals() methods of the type T that is being compared. Which +// allows us to use it in any collection instance that does not require a special hash or equals implementation. +type ObjEqComparator[T Collectable[T]] struct{} + +var ( + aStateEqInst = &ObjEqComparator[ATNState]{} + aConfEqInst = &ObjEqComparator[*ATNConfig]{} + + // aConfCompInst is the comparator used for the ATNConfigSet for the configLookup cache + aConfCompInst = &ATNConfigComparator[*ATNConfig]{} + atnConfCompInst = &BaseATNConfigComparator[*ATNConfig]{} + dfaStateEqInst = &ObjEqComparator[*DFAState]{} + semctxEqInst = &ObjEqComparator[SemanticContext]{} + atnAltCfgEqInst = &ATNAltConfigComparator[*ATNConfig]{} + pContextEqInst = &ObjEqComparator[*PredictionContext]{} +) + +// Equals2 delegates to the Equals() method of type T +func (c *ObjEqComparator[T]) Equals2(o1, o2 T) bool { + return o1.Equals(o2) +} + +// Hash1 delegates to the Hash() method of type T +func (c *ObjEqComparator[T]) Hash1(o T) int { + + return o.Hash() +} + +type SemCComparator[T Collectable[T]] struct{} + +// ATNConfigComparator is used as the comparator for the configLookup field of an ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type ATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNConfigComparator[T]) Hash1(o *ATNConfig) int { + + hash := 7 + hash = 31*hash + o.GetState().GetStateNumber() + hash = 31*hash + o.GetAlt() + hash = 31*hash + o.GetSemanticContext().Hash() + return hash +} + +// ATNAltConfigComparator is used as the comparator for mapping configs to Alt Bitsets +type ATNAltConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetContext().Equals(o2.GetContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup +func (c *ATNAltConfigComparator[T]) Hash1(o *ATNConfig) int { + h := murmurInit(7) + h = murmurUpdate(h, o.GetState().GetStateNumber()) + h = murmurUpdate(h, o.GetContext().Hash()) + return murmurFinish(h, 2) +} + +// BaseATNConfigComparator is used as the comparator for the configLookup field of a ATNConfigSet +// and has a custom Equals() and Hash() implementation, because equality is not based on the +// standard Hash() and Equals() methods of the ATNConfig type. +type BaseATNConfigComparator[T Collectable[T]] struct { +} + +// Equals2 is a custom comparator for ATNConfigs specifically for baseATNConfigSet +func (c *BaseATNConfigComparator[T]) Equals2(o1, o2 *ATNConfig) bool { + + // Same pointer, must be equal, even if both nil + // + if o1 == o2 { + return true + + } + + // If either are nil, but not both, then the result is false + // + if o1 == nil || o2 == nil { + return false + } + + return o1.GetState().GetStateNumber() == o2.GetState().GetStateNumber() && + o1.GetAlt() == o2.GetAlt() && + o1.GetSemanticContext().Equals(o2.GetSemanticContext()) +} + +// Hash1 is custom hash implementation for ATNConfigs specifically for configLookup, but in fact just +// delegates to the standard Hash() method of the ATNConfig type. +func (c *BaseATNConfigComparator[T]) Hash1(o *ATNConfig) int { + return o.Hash() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/configuration.go b/vendor/github.com/antlr4-go/antlr/v4/configuration.go new file mode 100644 index 00000000..c2b72451 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/configuration.go @@ -0,0 +1,214 @@ +package antlr + +type runtimeConfiguration struct { + statsTraceStacks bool + lexerATNSimulatorDebug bool + lexerATNSimulatorDFADebug bool + parserATNSimulatorDebug bool + parserATNSimulatorTraceATNSim bool + parserATNSimulatorDFADebug bool + parserATNSimulatorRetryDebug bool + lRLoopEntryBranchOpt bool + memoryManager bool +} + +// Global runtime configuration +var runtimeConfig = runtimeConfiguration{ + lRLoopEntryBranchOpt: true, +} + +type runtimeOption func(*runtimeConfiguration) error + +// ConfigureRuntime allows the runtime to be configured globally setting things like trace and statistics options. +// It uses the functional options pattern for go. This is a package global function as it operates on the runtime +// configuration regardless of the instantiation of anything higher up such as a parser or lexer. Generally this is +// used for debugging/tracing/statistics options, which are usually used by the runtime maintainers (or rather the +// only maintainer). However, it is possible that you might want to use this to set a global option concerning the +// memory allocation type used by the runtime such as sync.Pool or not. +// +// The options are applied in the order they are passed in, so the last option will override any previous options. +// +// For example, if you want to turn on the collection create point stack flag to true, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// If you want to turn it off, you can do: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func ConfigureRuntime(options ...runtimeOption) error { + for _, option := range options { + err := option(&runtimeConfig) + if err != nil { + return err + } + } + return nil +} + +// WithStatsTraceStacks sets the global flag indicating whether to collect stack traces at the create-point of +// certain structs, such as collections, or the use point of certain methods such as Put(). +// Because this can be expensive, it is turned off by default. However, it +// can be useful to track down exactly where memory is being created and used. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithStatsTraceStacks(false)) +func WithStatsTraceStacks(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.statsTraceStacks = trace + return nil + } +} + +// WithLexerATNSimulatorDebug sets the global flag indicating whether to log debug information from the lexer [ATN] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDebug(false)) +func WithLexerATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDebug = debug + return nil + } +} + +// WithLexerATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the lexer [ATN] [DFA] +// simulator. This is useful for debugging lexer issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLexerATNSimulatorDFADebug(false)) +func WithLexerATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lexerATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorDebug sets the global flag indicating whether to log debug information from the parser [ATN] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDebug(false)) +func WithParserATNSimulatorDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDebug = debug + return nil + } +} + +// WithParserATNSimulatorTraceATNSim sets the global flag indicating whether to log trace information from the parser [ATN] simulator +// [DFA]. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorTraceATNSim(false)) +func WithParserATNSimulatorTraceATNSim(trace bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorTraceATNSim = trace + return nil + } +} + +// WithParserATNSimulatorDFADebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator. This is useful for debugging parser issues by comparing the output with the Java runtime. Only useful +// to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorDFADebug(false)) +func WithParserATNSimulatorDFADebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorDFADebug = debug + return nil + } +} + +// WithParserATNSimulatorRetryDebug sets the global flag indicating whether to log debug information from the parser [ATN] [DFA] +// simulator when retrying a decision. This is useful for debugging parser issues by comparing the output with the Java runtime. +// Only useful to the runtime maintainers. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithParserATNSimulatorRetryDebug(false)) +func WithParserATNSimulatorRetryDebug(debug bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.parserATNSimulatorRetryDebug = debug + return nil + } +} + +// WithLRLoopEntryBranchOpt sets the global flag indicating whether let recursive loop operations should be +// optimized or not. This is useful for debugging parser issues by comparing the output with the Java runtime. +// It turns off the functionality of [canDropLoopEntryEdgeInLeftRecursiveRule] in [ParserATNSimulator]. +// +// Note that default is to use this optimization. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(true)) +// +// You can turn it off at any time using: +// +// antlr.ConfigureRuntime(antlr.WithLRLoopEntryBranchOpt(false)) +func WithLRLoopEntryBranchOpt(off bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.lRLoopEntryBranchOpt = off + return nil + } +} + +// WithMemoryManager sets the global flag indicating whether to use the memory manager or not. This is useful +// for poorly constructed grammars that create a lot of garbage. It turns on the functionality of [memoryManager], which +// will intercept garbage collection and cause available memory to be reused. At the end of the day, this is no substitute +// for fixing your grammar by ridding yourself of extreme ambiguity. BUt if you are just trying to reuse an opensource +// grammar, this may help make it more practical. +// +// Note that default is to use normal Go memory allocation and not pool memory. +// +// Use: +// +// antlr.ConfigureRuntime(antlr.WithMemoryManager(true)) +// +// Note that if you turn this on, you should probably leave it on. You should use only one memory strategy or the other +// and should remember to nil out any references to the parser or lexer when you are done with them. +func WithMemoryManager(use bool) runtimeOption { + return func(config *runtimeConfiguration) error { + config.memoryManager = use + return nil + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa.go b/vendor/github.com/antlr4-go/antlr/v4/dfa.go new file mode 100644 index 00000000..6b63eb15 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa.go @@ -0,0 +1,175 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// DFA represents the Deterministic Finite Automaton used by the recognizer, including all the states it can +// reach and the transitions between them. +type DFA struct { + // atnStartState is the ATN state in which this was created + atnStartState DecisionState + + decision int + + // states is all the DFA states. Use Map to get the old state back; Set can only + // indicate whether it is there. Go maps implement key hash collisions and so on and are very + // good, but the DFAState is an object and can't be used directly as the key as it can in say Java + // amd C#, whereby if the hashcode is the same for two objects, then Equals() is called against them + // to see if they really are the same object. Hence, we have our own map storage. + // + states *JStore[*DFAState, *ObjEqComparator[*DFAState]] + + numstates int + + s0 *DFAState + + // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. + // True if the DFA is for a precedence decision and false otherwise. + precedenceDfa bool +} + +func NewDFA(atnStartState DecisionState, decision int) *DFA { + dfa := &DFA{ + atnStartState: atnStartState, + decision: decision, + states: nil, // Lazy initialize + } + if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision { + dfa.precedenceDfa = true + dfa.s0 = NewDFAState(-1, NewATNConfigSet(false)) + dfa.s0.isAcceptState = false + dfa.s0.requiresFullContext = false + } + return dfa +} + +// getPrecedenceStartState gets the start state for the current precedence and +// returns the start state corresponding to the specified precedence if a start +// state exists for the specified precedence and nil otherwise. d must be a +// precedence DFA. See also isPrecedenceDfa. +func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + // s0.edges is never nil for a precedence DFA + if precedence < 0 || precedence >= len(d.getS0().getEdges()) { + return nil + } + + return d.getS0().getIthEdge(precedence) +} + +// setPrecedenceStartState sets the start state for the current precedence. d +// must be a precedence DFA. See also isPrecedenceDfa. +func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !d.getPrecedenceDfa() { + panic("only precedence DFAs may contain a precedence start state") + } + + if precedence < 0 { + return + } + + // Synchronization on s0 here is ok. When the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again. s0.edges + // is never nil for a precedence DFA. + s0 := d.getS0() + if precedence >= s0.numEdges() { + edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...) + s0.setEdges(edges) + d.setS0(s0) + } + + s0.setIthEdge(precedence, startState) +} + +func (d *DFA) getPrecedenceDfa() bool { + return d.precedenceDfa +} + +// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs +// from the current DFA configuration, then d.states is cleared, the initial +// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to +// store the start states for individual precedence values if precedenceDfa is +// true or nil otherwise, and d.precedenceDfa is updated. +func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { + if d.getPrecedenceDfa() != precedenceDfa { + d.states = nil // Lazy initialize + d.numstates = 0 + + if precedenceDfa { + precedenceState := NewDFAState(-1, NewATNConfigSet(false)) + precedenceState.setEdges(make([]*DFAState, 0)) + precedenceState.isAcceptState = false + precedenceState.requiresFullContext = false + d.setS0(precedenceState) + } else { + d.setS0(nil) + } + + d.precedenceDfa = precedenceDfa + } +} + +// Len returns the number of states in d. We use this instead of accessing states directly so that we can implement lazy +// instantiation of the states JMap. +func (d *DFA) Len() int { + if d.states == nil { + return 0 + } + return d.states.Len() +} + +// Get returns a state that matches s if it is present in the DFA state set. We defer to this +// function instead of accessing states directly so that we can implement lazy instantiation of the states JMap. +func (d *DFA) Get(s *DFAState) (*DFAState, bool) { + if d.states == nil { + return nil, false + } + return d.states.Get(s) +} + +func (d *DFA) Put(s *DFAState) (*DFAState, bool) { + if d.states == nil { + d.states = NewJStore[*DFAState, *ObjEqComparator[*DFAState]](dfaStateEqInst, DFAStateCollection, "DFA via DFA.Put") + } + return d.states.Put(s) +} + +func (d *DFA) getS0() *DFAState { + return d.s0 +} + +func (d *DFA) setS0(s *DFAState) { + d.s0 = s +} + +// sortedStates returns the states in d sorted by their state number, or an empty set if d.states is nil. +func (d *DFA) sortedStates() []*DFAState { + if d.states == nil { + return []*DFAState{} + } + vs := d.states.SortedSlice(func(i, j *DFAState) bool { + return i.stateNumber < j.stateNumber + }) + + return vs +} + +func (d *DFA) String(literalNames []string, symbolicNames []string) string { + if d.getS0() == nil { + return "" + } + + return NewDFASerializer(d, literalNames, symbolicNames).String() +} + +func (d *DFA) ToLexerString() string { + if d.getS0() == nil { + return "" + } + + return NewLexerDFASerializer(d).String() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go new file mode 100644 index 00000000..0e110098 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_serializer.go @@ -0,0 +1,158 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// DFASerializer is a DFA walker that knows how to dump the DFA states to serialized +// strings. +type DFASerializer struct { + dfa *DFA + literalNames []string + symbolicNames []string +} + +func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { + if literalNames == nil { + literalNames = make([]string, 0) + } + + if symbolicNames == nil { + symbolicNames = make([]string, 0) + } + + return &DFASerializer{ + dfa: dfa, + literalNames: literalNames, + symbolicNames: symbolicNames, + } +} + +func (d *DFASerializer) String() string { + if d.dfa.getS0() == nil { + return "" + } + + buf := "" + states := d.dfa.sortedStates() + + for _, s := range states { + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += d.GetStateString(s) + buf += "-" + buf += d.getEdgeLabel(j) + buf += "->" + buf += d.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} + +func (d *DFASerializer) getEdgeLabel(i int) string { + if i == 0 { + return "EOF" + } else if d.literalNames != nil && i-1 < len(d.literalNames) { + return d.literalNames[i-1] + } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { + return d.symbolicNames[i-1] + } + + return strconv.Itoa(i - 1) +} + +func (d *DFASerializer) GetStateString(s *DFAState) string { + var a, b string + + if s.isAcceptState { + a = ":" + } + + if s.requiresFullContext { + b = "^" + } + + baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b + + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } + + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + + return baseStateStr +} + +type LexerDFASerializer struct { + *DFASerializer +} + +func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { + return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} +} + +func (l *LexerDFASerializer) getEdgeLabel(i int) string { + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(i)) + sb.WriteByte('\'') + return sb.String() +} + +func (l *LexerDFASerializer) String() string { + if l.dfa.getS0() == nil { + return "" + } + + buf := "" + states := l.dfa.sortedStates() + + for i := 0; i < len(states); i++ { + s := states[i] + + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += l.GetStateString(s) + buf += "-" + buf += l.getEdgeLabel(j) + buf += "->" + buf += l.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go new file mode 100644 index 00000000..65414307 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/dfa_state.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// PredPrediction maps a predicate to a predicted alternative. +type PredPrediction struct { + alt int + pred SemanticContext +} + +func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { + return &PredPrediction{alt: alt, pred: pred} +} + +func (p *PredPrediction) String() string { + return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" +} + +// DFAState represents a set of possible [ATN] configurations. As Aho, Sethi, +// Ullman p. 117 says: "The DFA uses its state to keep track of all possible +// states the ATN can be in after reading each input symbol. That is to say, +// after reading input a1, a2,..an, the DFA is in a state that represents the +// subset T of the states of the ATN that are reachable from the ATN's start +// state along some path labeled a1a2..an." +// +// In conventional NFA-to-DFA conversion, therefore, the subset T would be a bitset representing the set of +// states the [ATN] could be in. We need to track the alt predicted by each state +// as well, however. More importantly, we need to maintain a stack of states, +// tracking the closure operations as they jump from rule to rule, emulating +// rule invocations (method calls). I have to add a stack to simulate the proper +// lookahead sequences for the underlying LL grammar from which the ATN was +// derived. +// +// I use a set of [ATNConfig] objects, not simple states. An [ATNConfig] is both a +// state (ala normal conversion) and a [RuleContext] describing the chain of rules +// (if any) followed to arrive at that state. +// +// A [DFAState] may have multiple references to a particular state, but with +// different [ATN] contexts (with same or different alts) meaning that state was +// reached via a different set of rule invocations. +type DFAState struct { + stateNumber int + configs *ATNConfigSet + + // edges elements point to the target of the symbol. Shift up by 1 so (-1) + // Token.EOF maps to the first element. + edges []*DFAState + + isAcceptState bool + + // prediction is the 'ttype' we match or alt we predict if the state is 'accept'. + // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or + // requiresFullContext. + prediction int + + lexerActionExecutor *LexerActionExecutor + + // requiresFullContext indicates it was created during an SLL prediction that + // discovered a conflict between the configurations in the state. Future + // ParserATNSimulator.execATN invocations immediately jump doing + // full context prediction if true. + requiresFullContext bool + + // predicates is the predicates associated with the ATN configurations of the + // DFA state during SLL parsing. When we have predicates, requiresFullContext + // is false, since full context prediction evaluates predicates on-the-fly. If + // d is + // not nil, then prediction is ATN.INVALID_ALT_NUMBER. + // + // We only use these for non-requiresFullContext but conflicting states. That + // means we know from the context (it's $ or we don't dip into outer context) + // that it's an ambiguity not a conflict. + // + // This list is computed by + // ParserATNSimulator.predicateDFAState. + predicates []*PredPrediction +} + +func NewDFAState(stateNumber int, configs *ATNConfigSet) *DFAState { + if configs == nil { + configs = NewATNConfigSet(false) + } + + return &DFAState{configs: configs, stateNumber: stateNumber} +} + +// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. +func (d *DFAState) GetAltSet() []int { + var alts []int + + if d.configs != nil { + for _, c := range d.configs.configs { + alts = append(alts, c.GetAlt()) + } + } + + if len(alts) == 0 { + return nil + } + + return alts +} + +func (d *DFAState) getEdges() []*DFAState { + return d.edges +} + +func (d *DFAState) numEdges() int { + return len(d.edges) +} + +func (d *DFAState) getIthEdge(i int) *DFAState { + return d.edges[i] +} + +func (d *DFAState) setEdges(newEdges []*DFAState) { + d.edges = newEdges +} + +func (d *DFAState) setIthEdge(i int, edge *DFAState) { + d.edges[i] = edge +} + +func (d *DFAState) setPrediction(v int) { + d.prediction = v +} + +func (d *DFAState) String() string { + var s string + if d.isAcceptState { + if d.predicates != nil { + s = "=>" + fmt.Sprint(d.predicates) + } else { + s = "=>" + fmt.Sprint(d.prediction) + } + } + + return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s) +} + +func (d *DFAState) Hash() int { + h := murmurInit(7) + h = murmurUpdate(h, d.configs.Hash()) + return murmurFinish(h, 1) +} + +// Equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) Equals(o Collectable[*DFAState]) bool { + if d == o { + return true + } + + return d.configs.Equals(o.(*DFAState).configs) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go new file mode 100644 index 00000000..bd2cd8bc --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/diagnostic_error_listener.go @@ -0,0 +1,110 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// +// This implementation of {@link ANTLRErrorListener} can be used to identify +// certain potential correctness and performance problems in grammars. "reports" +// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate +// message. +// +//
    +//
  • Ambiguities: These are cases where more than one path through the +// grammar can Match the input.
  • +//
  • Weak context sensitivity: These are cases where full-context +// prediction resolved an SLL conflict to a unique alternative which equaled the +// minimum alternative of the SLL conflict.
  • +//
  • Strong (forced) context sensitivity: These are cases where the +// full-context prediction resolved an SLL conflict to a unique alternative, +// and the minimum alternative of the SLL conflict was found to not be +// a truly viable alternative. Two-stage parsing cannot be used for inputs where +// d situation occurs.
  • +//
+ +type DiagnosticErrorListener struct { + *DefaultErrorListener + + exactOnly bool +} + +//goland:noinspection GoUnusedExportedFunction +func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { + + n := new(DiagnosticErrorListener) + + // whether all ambiguities or only exact ambiguities are Reported. + n.exactOnly = exactOnly + return n +} + +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if d.exactOnly && !exact { + return + } + msg := "reportAmbiguity d=" + + d.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + d.getConflictingAlts(ambigAlts, configs).String() + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, _ *BitSet, _ *ATNConfigSet) { + + msg := "reportAttemptingFullContext d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, _ int, _ *ATNConfigSet) { + msg := "reportContextSensitivity d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { + decision := dfa.decision + ruleIndex := dfa.atnStartState.GetRuleIndex() + + ruleNames := recognizer.GetRuleNames() + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { + return strconv.Itoa(decision) + } + ruleName := ruleNames[ruleIndex] + if ruleName == "" { + return strconv.Itoa(decision) + } + return strconv.Itoa(decision) + " (" + ruleName + ")" +} + +// Computes the set of conflicting or ambiguous alternatives from a +// configuration set, if that information was not already provided by the +// parser. +// +// @param ReportedAlts The set of conflicting or ambiguous alternatives, as +// Reported by the parser. +// @param configs The conflicting or ambiguous configuration set. +// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise +// returns the set of alternatives represented in {@code configs}. +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set *ATNConfigSet) *BitSet { + if ReportedAlts != nil { + return ReportedAlts + } + result := NewBitSet() + for _, c := range set.configs { + result.add(c.GetAlt()) + } + + return result +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_listener.go b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go new file mode 100644 index 00000000..21a02164 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/error_listener.go @@ -0,0 +1,100 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +//goland:noinspection GoUnusedExportedFunction +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(_ Recognizer, _ interface{}, _, _ int, _ string, _ RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(_ Parser, _ *DFA, _, _ int, _ bool, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(_ Parser, _ *DFA, _, _ int, _ *BitSet, _ *ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(_ Parser, _ *DFA, _, _, _ int, _ *ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// ConsoleErrorListenerINSTANCE provides a default instance of {@link ConsoleErrorListener}. +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// SyntaxError prints messages to System.err containing the +// values of line, charPositionInLine, and msg using +// the following format: +// +// line : +func (c *ConsoleErrorListener) SyntaxError(_ Recognizer, _ interface{}, line, column int, msg string, _ RecognitionException) { + _, _ = fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs *ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go new file mode 100644 index 00000000..9db2be1c --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/error_strategy.go @@ -0,0 +1,702 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + InErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// DefaultErrorStrategy is the default implementation of ANTLRErrorStrategy used for +// error reporting and recovery in ANTLR parsers. +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //InErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseam. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +func (d *DefaultErrorStrategy) beginErrorCondition(_ Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) InErrorRecoveryMode(_ Parser) bool { + return d.errorRecoveryMode +} + +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +func (d *DefaultErrorStrategy) endErrorCondition(_ Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// ReportMatch is the default implementation of error matching and simply calls endErrorCondition. +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// ReportError is the default implementation of error reporting. +// It returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls [beginErrorCondition] +// and dispatches the Reporting task based on the runtime type of e +// according to the following table. +// +// [NoViableAltException] : Dispatches the call to [ReportNoViableAlternative] +// [InputMisMatchException] : Dispatches the call to [ReportInputMisMatch] +// [FailedPredicateException] : Dispatches the call to [ReportFailedPredicate] +// All other types : Calls [NotifyErrorListeners] to Report the exception +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.InErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// Recover is the default recovery implementation. +// It reSynchronizes the parser by consuming tokens until we find one in the reSynchronization set - +// loosely the set of tokens that can follow the current rule. +func (d *DefaultErrorStrategy) Recover(recognizer Parser, _ RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.GetErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// Sync is the default implementation of error strategy synchronization. +// +// This Sync makes sure that the current lookahead symbol is consistent with what were expecting +// at this point in the [ATN]. You can call this anytime but ANTLR only +// generates code to check before sub-rules/loops and each iteration. +// +// Implements [Jim Idle]'s magic Sync mechanism in closures and optional +// sub-rules. E.g.: +// +// a : Sync ( stuff Sync )* +// Sync : {consume to what can follow Sync} +// +// At the start of a sub-rule upon error, Sync performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +// If the sub-rule is optional +// +// ({@code (...)?}, {@code (...)*}, +// +// or a block with an empty alternative), then the expected set includes what follows +// the sub-rule. +// +// During loop iteration, it consumes until it sees a token that can start a +// sub-rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible. +// +// # Origins +// +// Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule: +// +// classfunc : 'class' ID '{' member* '}' +// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +// This functionality cost a bit of effort because the parser has to +// compare the token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off this +// functionality by simply overriding this method as empty: +// +// { } +// +// [Jim Idle]: https://github.com/jimidle +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.InErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + recognizer.SetError(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.GetErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// ReportNoViableAlternative is called by [ReportError] when the exception is a [NoViableAltException]. +// +// See also [ReportError] +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportInputMisMatch is called by [ReportError] when the exception is an [InputMisMatchException] +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + d.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportFailedPredicate is called by [ReportError] when the exception is a [FailedPredicateException]. +// +// See also: [ReportError] +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// ReportUnwantedToken is called to report a syntax error that requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is the current LT(1) symbol and has not yet been +// removed from the input stream. When this method returns, +// recognizer is in error recovery mode. +// +// This method is called when singleTokenDeletion identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling +// [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// ReportMissingToken is called to report a syntax error which requires the +// insertion of a missing token into the input stream. At the time this +// method is called, the missing token has not yet been inserted. When this +// method returns, recognizer is in error recovery mode. +// +// This method is called when singleTokenInsertion identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error. +// +// The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls beginErrorCondition to +// enter error recovery mode, followed by calling [NotifyErrorListeners] +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.InErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// The RecoverInline default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, this method panics with [InputMisMatchException}. +// TODO: Not sure that panic() is the right thing to do here - JI +// +// # EXTRA TOKEN (single token deletion) +// +// LA(1) is not what we are looking for. If LA(2) has the +// right token, however, then assume LA(1) is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the LA(2) token) as the successful result of the Match operation. +// +// # This recovery strategy is implemented by singleTokenDeletion +// +// # MISSING TOKEN (single token insertion) +// +// If current token -at LA(1) - is consistent with what could come +// after the expected LA(1) token, then assume the token is missing +// and use the parser's [TokenFactory] to create it on the fly. The +// “insertion” is performed by returning the created token as the successful +// result of the Match operation. +// +// This recovery strategy is implemented by [SingleTokenInsertion]. +// +// # Example +// +// For example, Input i=(3 is clearly missing the ')'. When +// the parser returns from the nested call to expr, it will have +// call the chain: +// +// stat → expr → atom +// +// and it will be trying to Match the ')' at this point in the +// derivation: +// +// : ID '=' '(' INT ')' ('+' atom)* ';' +// ^ +// +// The attempt to [Match] ')' will fail when it sees ';' and +// call [RecoverInline]. To recover, it sees that LA(1)==';' +// is in the set of tokens that can follow the ')' token reference +// in rule atom. It can assume that you forgot the ')'. +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + recognizer.SetError(NewInputMisMatchException(recognizer)) + return nil +} + +// SingleTokenInsertion implements the single-token insertion inline error recovery +// strategy. It is called by [RecoverInline] if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +// This method determines whether single-token insertion is viable by +// checking if the LA(1) input symbol could be successfully Matched +// if it were instead the LA(2) symbol. If this method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce this behavior.

+// +// This func returns true if single-token insertion is a viable recovery +// strategy for the current mismatched input. +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// SingleTokenDeletion implements the single-token deletion inline error recovery +// strategy. It is called by [RecoverInline] to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// recognizer will not be in error recovery mode since the +// returned token was a successful Match. +// +// If the single-token deletion is successful, this method calls +// [ReportUnwantedToken] to Report the error, followed by +// [Consume] to actually “delete” the extraneous token. Then, +// before returning, [ReportMatch] is called to signal a successful +// Match. +// +// The func returns the successfully Matched [Token] instance if single-token +// deletion successfully recovers from the mismatched input, otherwise nil. +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// GetMissingSymbol conjures up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example: +// +// x=ID {f($x)}. +// +// The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// this token is missing, and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a [CommonToken] of the appropriate type. The text will be the token name. +// If you need to change which tokens must be created by the lexer, +// override this method to create the appropriate tokens. +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO: matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// GetTokenErrorDisplay determines how a token should be displayed in an error message. +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override this func in that case +// to use t.String() (which, for [CommonToken], dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a new type. +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// GetErrorRecoverySet computes the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack. This amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// # Example +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r or any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider the grammar: +// +// a : '[' b ']' +// | '(' b ')' +// ; +// +// b : c '^' INT +// ; +// +// c : ID +// | INT +// ; +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input “[]”, the call chain is +// +// a → b → c +// +// and, hence, the follow context stack is: +// +// Depth Follow set Start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets - the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: +// +// {']','^'} +// +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// [Algorithms + Data Structures = Programs] by Niklaus Wirth and +// [A note on error recovery in recursive descent parsers]. +// +// Later, Josef Grosch had some good ideas in [Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers] +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined at run-time upon error to avoid overhead +// during parsing. Later, the runtime Sync was improved for loops/sub-rules see [Sync] docs +// +// [A note on error recovery in recursive descent parsers]: http://portal.acm.org/citation.cfm?id=947902.947905 +// [Algorithms + Data Structures = Programs]: https://t.ly/5QzgE +// [Efficient and Comfortable Error Recovery in Recursive Descent Parsers]: ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +func (d *DefaultErrorStrategy) GetErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// The BailErrorStrategy implementation of ANTLRErrorStrategy responds to syntax errors +// by immediately canceling the parse operation with a +// [ParseCancellationException]. The implementation ensures that the +// [ParserRuleContext//exception] field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +// This error strategy is useful in the following scenarios. +// +// - Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of [BailErrorStrategy.Sync] improves the performance of +// the first stage. +// +// - Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the [BailErrorStrategy] avoids wasting work on recovering from errors +// when the result will be ignored either way. +// +// myparser.SetErrorHandler(NewBailErrorStrategy()) +// +// See also: [Parser.SetErrorHandler(ANTLRErrorStrategy)] +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +//goland:noinspection GoUnusedExportedFunction +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Recover Instead of recovering from exception e, re-panic it wrapped +// in a [ParseCancellationException] so it is not caught by the +// rule func catches. Use Exception.GetCause() to get the +// original [RecognitionException]. +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + if parent, ok := context.GetParent().(ParserRuleContext); ok { + context = parent + } else { + context = nil + } + } + recognizer.SetError(NewParseCancellationException()) // TODO: we don't emit e properly +} + +// RecoverInline makes sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Sync makes sure we don't attempt to recover from problems in sub-rules. +func (b *BailErrorStrategy) Sync(_ Parser) { +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/errors.go b/vendor/github.com/antlr4-go/antlr/v4/errors.go new file mode 100644 index 00000000..8f0f2f60 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/errors.go @@ -0,0 +1,259 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO: may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + + // The current Token when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + // + t.offendingToken = nil + + // Get the ATN state number the parser was in at the time the error + // occurred. For NoViableAltException and LexerNoViableAltException exceptions, this is the + // DecisionState number. For others, it is the state whose outgoing edge we couldn't Match. + // + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// getExpectedTokens gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time this exception was raised. +// +// If the set of expected tokens is not known and could not be computed, +// this method returns nil. +// +// The func returns the set of token types that could potentially follow the current +// state in the {ATN}, or nil if the information is not available. + +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs *ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs *ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs *ATNConfigSet +} + +// NewNoViableAltException creates an exception indicating that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. +// +// Reported by [ReportNoViableAlternative] +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs *ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1) + n.deadEndConfigs = deadEndConfigs + + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. + // + // At the time the error occurred, of course the stream needs to keep a + // buffer of all the tokens, but later we might not have access to those. + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// NewInputMisMatchException creates an exception that signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// FailedPredicateException indicates that a semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func (p ParseCancellationException) GetOffendingToken() Token { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetMessage() string { + //TODO implement me + panic("implement me") +} + +func (p ParseCancellationException) GetInputStream() IntStream { + //TODO implement me + panic("implement me") +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/file_stream.go b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go new file mode 100644 index 00000000..5f65f809 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/file_stream.go @@ -0,0 +1,67 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + InputStream + filename string +} + +//goland:noinspection GoUnusedExportedFunction +func NewFileStream(fileName string) (*FileStream, error) { + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + + defer func(f *os.File) { + errF := f.Close() + if errF != nil { + } + }(f) + + reader := bufio.NewReader(f) + fInfo, err := f.Stat() + if err != nil { + return nil, err + } + + fs := &FileStream{ + InputStream: InputStream{ + index: 0, + name: fileName, + }, + filename: fileName, + } + + // Pre-build the buffer and read runes efficiently + // + fs.data = make([]rune, 0, fInfo.Size()) + for { + r, _, err := reader.ReadRune() + if err != nil { + break + } + fs.data = append(fs.data, r) + } + fs.size = len(fs.data) // Size in runes + + // All done. + // + return fs, nil +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/input_stream.go b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go new file mode 100644 index 00000000..ab4e96be --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/input_stream.go @@ -0,0 +1,157 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bufio" + "io" +) + +type InputStream struct { + name string + index int + data []rune + size int +} + +// NewIoStream creates a new input stream from the given io.Reader reader. +// Note that the reader is read completely into memory and so it must actually +// have a stopping point - you cannot pass in a reader on an open-ended source such +// as a socket for instance. +func NewIoStream(reader io.Reader) *InputStream { + + rReader := bufio.NewReader(reader) + + is := &InputStream{ + name: "", + index: 0, + } + + // Pre-build the buffer and read runes reasonably efficiently given that + // we don't exactly know how big the input is. + // + is.data = make([]rune, 0, 512) + for { + r, _, err := rReader.ReadRune() + if err != nil { + break + } + is.data = append(is.data, r) + } + is.size = len(is.data) // number of runes + return is +} + +// NewInputStream creates a new input stream from the given string +func NewInputStream(data string) *InputStream { + + is := &InputStream{ + name: "", + index: 0, + data: []rune(data), // This is actually the most efficient way + } + is.size = len(is.data) // number of runes, but we could also use len(data), which is efficient too + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +// Consume moves the input pointer to the next character in the input stream +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +// LA returns the character at the given offset from the start of the input stream +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +// LT returns the character at the given offset from the start of the input stream +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +// Index returns the current offset in to the input stream +func (is *InputStream) Index() int { + return is.index +} + +// Size returns the total number of characters in the input stream +func (is *InputStream) Size() int { + return is.size +} + +// Mark does nothing here as we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +// Release does nothing here as we have entire buffer +func (is *InputStream) Release(_ int) { +} + +// Seek the input point to the provided index offset +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +// GetText returns the text from the input stream from the start to the stop index +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +// GetTextFromTokens returns the text from the input stream from the first character of the start token to the last +// character of the stop token +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "Obtained from string" +} + +// String returns the entire input stream as a string +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/int_stream.go b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go new file mode 100644 index 00000000..4778878b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/int_stream.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type IntStream interface { + Consume() + LA(int) int + Mark() int + Release(marker int) + Index() int + Seek(index int) + Size() int + GetSourceName() string +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/interval_set.go b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go new file mode 100644 index 00000000..cc506606 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/interval_set.go @@ -0,0 +1,330 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type Interval struct { + Start int + Stop int +} + +// NewInterval creates a new interval with the given start and stop values. +func NewInterval(start, stop int) Interval { + return Interval{ + Start: start, + Stop: stop, + } +} + +// Contains returns true if the given item is contained within the interval. +func (i Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop +} + +// String generates a string representation of the interval. +func (i Interval) String() string { + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) + } + + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) +} + +// Length returns the length of the interval. +func (i Interval) Length() int { + return i.Stop - i.Start +} + +// IntervalSet represents a collection of [Intervals], which may be read-only. +type IntervalSet struct { + intervals []Interval + readOnly bool +} + +// NewIntervalSet creates a new empty, writable, interval set. +func NewIntervalSet() *IntervalSet { + + i := new(IntervalSet) + + i.intervals = nil + i.readOnly = false + + return i +} + +func (i *IntervalSet) Equals(other *IntervalSet) bool { + if len(i.intervals) != len(other.intervals) { + return false + } + + for k, v := range i.intervals { + if v.Start != other.intervals[k].Start || v.Stop != other.intervals[k].Stop { + return false + } + } + + return true +} + +func (i *IntervalSet) first() int { + if len(i.intervals) == 0 { + return TokenInvalidType + } + + return i.intervals[0].Start +} + +func (i *IntervalSet) addOne(v int) { + i.addInterval(NewInterval(v, v+1)) +} + +func (i *IntervalSet) addRange(l, h int) { + i.addInterval(NewInterval(l, h+1)) +} + +func (i *IntervalSet) addInterval(v Interval) { + if i.intervals == nil { + i.intervals = make([]Interval, 0) + i.intervals = append(i.intervals, v) + } else { + // find insert pos + for k, interval := range i.intervals { + // distinct range -> insert + if v.Stop < interval.Start { + i.intervals = append(i.intervals[0:k], append([]Interval{v}, i.intervals[k:]...)...) + return + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start + return + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) + + // if not applying to end, merge potential overlaps + if k < len(i.intervals)-1 { + l := i.intervals[k] + r := i.intervals[k+1] + // if r contained in l + if l.Stop >= r.Stop { + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } + } + return + } + } + // greater than any exiting + i.intervals = append(i.intervals, v) + } +} + +func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { + if other.intervals != nil { + for k := 0; k < len(other.intervals); k++ { + i2 := other.intervals[k] + i.addInterval(NewInterval(i2.Start, i2.Stop)) + } + } + return i +} + +func (i *IntervalSet) complement(start int, stop int) *IntervalSet { + result := NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for j := 0; j < len(i.intervals); j++ { + result.removeRange(i.intervals[j]) + } + return result +} + +func (i *IntervalSet) contains(item int) bool { + if i.intervals == nil { + return false + } + for k := 0; k < len(i.intervals); k++ { + if i.intervals[k].Contains(item) { + return true + } + } + return false +} + +func (i *IntervalSet) length() int { + iLen := 0 + + for _, v := range i.intervals { + iLen += v.Length() + } + + return iLen +} + +func (i *IntervalSet) removeRange(v Interval) { + if v.Start == v.Stop-1 { + i.removeOne(v.Start) + } else if i.intervals != nil { + k := 0 + for n := 0; n < len(i.intervals); n++ { + ni := i.intervals[k] + // intervals are ordered + if v.Stop <= ni.Start { + return + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) + return + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + k = k - 1 // need another pass + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) + } + k++ + } + } +} + +func (i *IntervalSet) removeOne(v int) { + if i.intervals != nil { + for k := 0; k < len(i.intervals); k++ { + ki := i.intervals[k] + // intervals i ordered + if v < ki.Start { + return + } else if v == ki.Start && v == ki.Stop-1 { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + return + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) + return + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) + return + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]Interval{x}, i.intervals[k:]...)...) + return + } + } + } +} + +func (i *IntervalSet) String() string { + return i.StringVerbose(nil, nil, false) +} + +func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { + + if i.intervals == nil { + return "{}" + } else if literalNames != nil || symbolicNames != nil { + return i.toTokenString(literalNames, symbolicNames) + } else if elemsAreChar { + return i.toCharString() + } + + return i.toIndexString() +} + +func (i *IntervalSet) GetIntervals() []Interval { + return i.intervals +} + +func (i *IntervalSet) toCharString() string { + names := make([]string, len(i.intervals)) + + var sb strings.Builder + + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } else { + sb.WriteByte('\'') + sb.WriteRune(rune(v.Start)) + sb.WriteString("'..'") + sb.WriteRune(rune(v.Stop - 1)) + sb.WriteByte('\'') + names = append(names, sb.String()) + sb.Reset() + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toIndexString() string { + + names := make([]string, 0) + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, strconv.Itoa(v.Start)) + } + } else { + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { + names := make([]string, 0) + for _, v := range i.intervals { + for j := v.Start; j < v.Stop; j++ { + names = append(names, i.elementName(literalNames, symbolicNames, j)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { + if a == TokenEOF { + return "" + } else if a == TokenEpsilon { + return "" + } else { + if a < len(literalNames) && literalNames[a] != "" { + return literalNames[a] + } + + return symbolicNames[a] + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/jcollect.go b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go new file mode 100644 index 00000000..6d668f79 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/jcollect.go @@ -0,0 +1,684 @@ +package antlr + +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +import ( + "container/list" + "runtime/debug" + "sort" +) + +// Collectable is an interface that a struct should implement if it is to be +// usable as a key in these collections. +type Collectable[T any] interface { + Hash() int + Equals(other Collectable[T]) bool +} + +type Comparator[T any] interface { + Hash1(o T) int + Equals2(T, T) bool +} + +type CollectionSource int +type CollectionDescriptor struct { + SybolicName string + Description string +} + +const ( + UnknownCollection CollectionSource = iota + ATNConfigLookupCollection + ATNStateCollection + DFAStateCollection + ATNConfigCollection + PredictionContextCollection + SemanticContextCollection + ClosureBusyCollection + PredictionVisitedCollection + MergeCacheCollection + PredictionContextCacheCollection + AltSetCollection + ReachSetCollection +) + +var CollectionDescriptors = map[CollectionSource]CollectionDescriptor{ + UnknownCollection: { + SybolicName: "UnknownCollection", + Description: "Unknown collection type. Only used if the target author thought it was an unimportant collection.", + }, + ATNConfigCollection: { + SybolicName: "ATNConfigCollection", + Description: "ATNConfig collection. Used to store the ATNConfigs for a particular state in the ATN." + + "For instance, it is used to store the results of the closure() operation in the ATN.", + }, + ATNConfigLookupCollection: { + SybolicName: "ATNConfigLookupCollection", + Description: "ATNConfigLookup collection. Used to store the ATNConfigs for a particular state in the ATN." + + "This is used to prevent duplicating equivalent states in an ATNConfigurationSet.", + }, + ATNStateCollection: { + SybolicName: "ATNStateCollection", + Description: "ATNState collection. This is used to store the states of the ATN.", + }, + DFAStateCollection: { + SybolicName: "DFAStateCollection", + Description: "DFAState collection. This is used to store the states of the DFA.", + }, + PredictionContextCollection: { + SybolicName: "PredictionContextCollection", + Description: "PredictionContext collection. This is used to store the prediction contexts of the ATN and cache computes.", + }, + SemanticContextCollection: { + SybolicName: "SemanticContextCollection", + Description: "SemanticContext collection. This is used to store the semantic contexts of the ATN.", + }, + ClosureBusyCollection: { + SybolicName: "ClosureBusyCollection", + Description: "ClosureBusy collection. This is used to check and prevent infinite recursion right recursive rules." + + "It stores ATNConfigs that are currently being processed in the closure() operation.", + }, + PredictionVisitedCollection: { + SybolicName: "PredictionVisitedCollection", + Description: "A map that records whether we have visited a particular context when searching through cached entries.", + }, + MergeCacheCollection: { + SybolicName: "MergeCacheCollection", + Description: "A map that records whether we have already merged two particular contexts and can save effort by not repeating it.", + }, + PredictionContextCacheCollection: { + SybolicName: "PredictionContextCacheCollection", + Description: "A map that records whether we have already created a particular context and can save effort by not computing it again.", + }, + AltSetCollection: { + SybolicName: "AltSetCollection", + Description: "Used to eliminate duplicate alternatives in an ATN config set.", + }, + ReachSetCollection: { + SybolicName: "ReachSetCollection", + Description: "Used as merge cache to prevent us needing to compute the merge of two states if we have already done it.", + }, +} + +// JStore implements a container that allows the use of a struct to calculate the key +// for a collection of values akin to map. This is not meant to be a full-blown HashMap but just +// serve the needs of the ANTLR Go runtime. +// +// For ease of porting the logic of the runtime from the master target (Java), this collection +// operates in a similar way to Java, in that it can use any struct that supplies a Hash() and Equals() +// function as the key. The values are stored in a standard go map which internally is a form of hashmap +// itself, the key for the go map is the hash supplied by the key object. The collection is able to deal with +// hash conflicts by using a simple slice of values associated with the hash code indexed bucket. That isn't +// particularly efficient, but it is simple, and it works. As this is specifically for the ANTLR runtime, and +// we understand the requirements, then this is fine - this is not a general purpose collection. +type JStore[T any, C Comparator[T]] struct { + store map[int][]T + len int + comparator Comparator[T] + stats *JStatRec +} + +func NewJStore[T any, C Comparator[T]](comparator Comparator[T], cType CollectionSource, desc string) *JStore[T, C] { + + if comparator == nil { + panic("comparator cannot be nil") + } + + s := &JStore[T, C]{ + store: make(map[int][]T, 1), + comparator: comparator, + } + if collectStats { + s.stats = &JStatRec{ + Source: cType, + Description: desc, + } + + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + s.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(s.stats) + } + return s +} + +// Put will store given value in the collection. Note that the key for storage is generated from +// the value itself - this is specifically because that is what ANTLR needs - this would not be useful +// as any kind of general collection. +// +// If the key has a hash conflict, then the value will be added to the slice of values associated with the +// hash, unless the value is already in the slice, in which case the existing value is returned. Value equivalence is +// tested by calling the equals() method on the key. +// +// # If the given value is already present in the store, then the existing value is returned as v and exists is set to true +// +// If the given value is not present in the store, then the value is added to the store and returned as v and exists is set to false. +func (s *JStore[T, C]) Put(value T) (v T, exists bool) { + + if collectStats { + s.stats.Puts++ + } + kh := s.comparator.Hash1(value) + + var hClash bool + for _, v1 := range s.store[kh] { + hClash = true + if s.comparator.Equals2(value, v1) { + if collectStats { + s.stats.PutHits++ + s.stats.PutHashConflicts++ + } + return v1, true + } + if collectStats { + s.stats.PutMisses++ + } + } + if collectStats && hClash { + s.stats.PutHashConflicts++ + } + s.store[kh] = append(s.store[kh], value) + + if collectStats { + if len(s.store[kh]) > s.stats.MaxSlotSize { + s.stats.MaxSlotSize = len(s.store[kh]) + } + } + s.len++ + if collectStats { + s.stats.CurSize = s.len + if s.len > s.stats.MaxSize { + s.stats.MaxSize = s.len + } + } + return value, false +} + +// Get will return the value associated with the key - the type of the key is the same type as the value +// which would not generally be useful, but this is a specific thing for ANTLR where the key is +// generated using the object we are going to store. +func (s *JStore[T, C]) Get(key T) (T, bool) { + if collectStats { + s.stats.Gets++ + } + kh := s.comparator.Hash1(key) + var hClash bool + for _, v := range s.store[kh] { + hClash = true + if s.comparator.Equals2(key, v) { + if collectStats { + s.stats.GetHits++ + s.stats.GetHashConflicts++ + } + return v, true + } + if collectStats { + s.stats.GetMisses++ + } + } + if collectStats { + if hClash { + s.stats.GetHashConflicts++ + } + s.stats.GetNoEnt++ + } + return key, false +} + +// Contains returns true if the given key is present in the store +func (s *JStore[T, C]) Contains(key T) bool { + _, present := s.Get(key) + return present +} + +func (s *JStore[T, C]) SortedSlice(less func(i, j T) bool) []T { + vs := make([]T, 0, len(s.store)) + for _, v := range s.store { + vs = append(vs, v...) + } + sort.Slice(vs, func(i, j int) bool { + return less(vs[i], vs[j]) + }) + + return vs +} + +func (s *JStore[T, C]) Each(f func(T) bool) { + for _, e := range s.store { + for _, v := range e { + f(v) + } + } +} + +func (s *JStore[T, C]) Len() int { + return s.len +} + +func (s *JStore[T, C]) Values() []T { + vs := make([]T, 0, len(s.store)) + for _, e := range s.store { + vs = append(vs, e...) + } + return vs +} + +type entry[K, V any] struct { + key K + val V +} + +type JMap[K, V any, C Comparator[K]] struct { + store map[int][]*entry[K, V] + len int + comparator Comparator[K] + stats *JStatRec +} + +func NewJMap[K, V any, C Comparator[K]](comparator Comparator[K], cType CollectionSource, desc string) *JMap[K, V, C] { + m := &JMap[K, V, C]{ + store: make(map[int][]*entry[K, V], 1), + comparator: comparator, + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (m *JMap[K, V, C]) Put(key K, val V) (V, bool) { + if collectStats { + m.stats.Puts++ + } + kh := m.comparator.Hash1(key) + + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.PutHits++ + m.stats.PutHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.PutMisses++ + } + } + if collectStats { + if hClash { + m.stats.PutHashConflicts++ + } + } + m.store[kh] = append(m.store[kh], &entry[K, V]{key, val}) + if collectStats { + if len(m.store[kh]) > m.stats.MaxSlotSize { + m.stats.MaxSlotSize = len(m.store[kh]) + } + } + m.len++ + if collectStats { + m.stats.CurSize = m.len + if m.len > m.stats.MaxSize { + m.stats.MaxSize = m.len + } + } + return val, false +} + +func (m *JMap[K, V, C]) Values() []V { + vs := make([]V, 0, len(m.store)) + for _, e := range m.store { + for _, v := range e { + vs = append(vs, v.val) + } + } + return vs +} + +func (m *JMap[K, V, C]) Get(key K) (V, bool) { + if collectStats { + m.stats.Gets++ + } + var none V + kh := m.comparator.Hash1(key) + var hClash bool + for _, e := range m.store[kh] { + hClash = true + if m.comparator.Equals2(e.key, key) { + if collectStats { + m.stats.GetHits++ + m.stats.GetHashConflicts++ + } + return e.val, true + } + if collectStats { + m.stats.GetMisses++ + } + } + if collectStats { + if hClash { + m.stats.GetHashConflicts++ + } + m.stats.GetNoEnt++ + } + return none, false +} + +func (m *JMap[K, V, C]) Len() int { + return m.len +} + +func (m *JMap[K, V, C]) Delete(key K) { + kh := m.comparator.Hash1(key) + for i, e := range m.store[kh] { + if m.comparator.Equals2(e.key, key) { + m.store[kh] = append(m.store[kh][:i], m.store[kh][i+1:]...) + m.len-- + return + } + } +} + +func (m *JMap[K, V, C]) Clear() { + m.store = make(map[int][]*entry[K, V]) +} + +type JPCMap struct { + store *JMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]] + size int + stats *JStatRec +} + +func NewJPCMap(cType CollectionSource, desc string) *JPCMap { + m := &JPCMap{ + store: NewJMap[*PredictionContext, *JMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]], *ObjEqComparator[*PredictionContext]](pContextEqInst, cType, desc), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func (pcm *JPCMap) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + // Do we have a map stored by k1? + // + m2, present := pcm.store.Get(k1) + if present { + if collectStats { + pcm.stats.GetHits++ + } + // We found a map of values corresponding to k1, so now we need to look up k2 in that map + // + return m2.Get(k2) + } + if collectStats { + pcm.stats.GetMisses++ + } + return nil, false +} + +func (pcm *JPCMap) Put(k1, k2, v *PredictionContext) { + + if collectStats { + pcm.stats.Puts++ + } + // First does a map already exist for k1? + // + if m2, present := pcm.store.Get(k1); present { + if collectStats { + pcm.stats.PutHits++ + } + _, present = m2.Put(k2, v) + if !present { + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + } + } else { + // No map found for k1, so we create it, add in our value, then store is + // + if collectStats { + pcm.stats.PutMisses++ + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, pcm.stats.Source, pcm.stats.Description+" map entry") + } else { + m2 = NewJMap[*PredictionContext, *PredictionContext, *ObjEqComparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "map entry") + } + + m2.Put(k2, v) + pcm.store.Put(k1, m2) + pcm.size++ + } +} + +type JPCMap2 struct { + store map[int][]JPCEntry + size int + stats *JStatRec +} + +type JPCEntry struct { + k1, k2, v *PredictionContext +} + +func NewJPCMap2(cType CollectionSource, desc string) *JPCMap2 { + m := &JPCMap2{ + store: make(map[int][]JPCEntry, 1000), + } + if collectStats { + m.stats = &JStatRec{ + Source: cType, + Description: desc, + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + m.stats.CreateStack = debug.Stack() + } + Statistics.AddJStatRec(m.stats) + } + return m +} + +func dHash(k1, k2 *PredictionContext) int { + return k1.cachedHash*31 + k2.cachedHash +} + +func (pcm *JPCMap2) Get(k1, k2 *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Gets++ + } + + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.GetHits++ + pcm.stats.GetHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.GetMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.GetHashConflicts++ + } + pcm.stats.GetNoEnt++ + } + return nil, false +} + +func (pcm *JPCMap2) Put(k1, k2, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + pcm.stats.Puts++ + } + h := dHash(k1, k2) + var hClash bool + for _, e := range pcm.store[h] { + hClash = true + if e.k1.Equals(k1) && e.k2.Equals(k2) { + if collectStats { + pcm.stats.PutHits++ + pcm.stats.PutHashConflicts++ + } + return e.v, true + } + if collectStats { + pcm.stats.PutMisses++ + } + } + if collectStats { + if hClash { + pcm.stats.PutHashConflicts++ + } + } + pcm.store[h] = append(pcm.store[h], JPCEntry{k1, k2, v}) + pcm.size++ + if collectStats { + pcm.stats.CurSize = pcm.size + if pcm.size > pcm.stats.MaxSize { + pcm.stats.MaxSize = pcm.size + } + } + return nil, false +} + +type VisitEntry struct { + k *PredictionContext + v *PredictionContext +} +type VisitRecord struct { + store map[*PredictionContext]*PredictionContext + len int + stats *JStatRec +} + +type VisitList struct { + cache *list.List + lock RWMutex +} + +var visitListPool = VisitList{ + cache: list.New(), + lock: RWMutex{}, +} + +// NewVisitRecord returns a new VisitRecord instance from the pool if available. +// Note that this "map" uses a pointer as a key because we are emulating the behavior of +// IdentityHashMap in Java, which uses the `==` operator to compare whether the keys are equal, +// which means is the key the same reference to an object rather than is it .equals() to another +// object. +func NewVisitRecord() *VisitRecord { + visitListPool.lock.Lock() + el := visitListPool.cache.Front() + defer visitListPool.lock.Unlock() + var vr *VisitRecord + if el == nil { + vr = &VisitRecord{ + store: make(map[*PredictionContext]*PredictionContext), + } + if collectStats { + vr.stats = &JStatRec{ + Source: PredictionContextCacheCollection, + Description: "VisitRecord", + } + // Track where we created it from if we are being asked to do so + if runtimeConfig.statsTraceStacks { + vr.stats.CreateStack = debug.Stack() + } + } + } else { + vr = el.Value.(*VisitRecord) + visitListPool.cache.Remove(el) + vr.store = make(map[*PredictionContext]*PredictionContext) + } + if collectStats { + Statistics.AddJStatRec(vr.stats) + } + return vr +} + +func (vr *VisitRecord) Release() { + vr.len = 0 + vr.store = nil + if collectStats { + vr.stats.MaxSize = 0 + vr.stats.CurSize = 0 + vr.stats.Gets = 0 + vr.stats.GetHits = 0 + vr.stats.GetMisses = 0 + vr.stats.GetHashConflicts = 0 + vr.stats.GetNoEnt = 0 + vr.stats.Puts = 0 + vr.stats.PutHits = 0 + vr.stats.PutMisses = 0 + vr.stats.PutHashConflicts = 0 + vr.stats.MaxSlotSize = 0 + } + visitListPool.lock.Lock() + visitListPool.cache.PushBack(vr) + visitListPool.lock.Unlock() +} + +func (vr *VisitRecord) Get(k *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Gets++ + } + v := vr.store[k] + if v != nil { + if collectStats { + vr.stats.GetHits++ + } + return v, true + } + if collectStats { + vr.stats.GetNoEnt++ + } + return nil, false +} + +func (vr *VisitRecord) Put(k, v *PredictionContext) (*PredictionContext, bool) { + if collectStats { + vr.stats.Puts++ + } + vr.store[k] = v + vr.len++ + if collectStats { + vr.stats.CurSize = vr.len + if vr.len > vr.stats.MaxSize { + vr.stats.MaxSize = vr.len + } + } + return v, false +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer.go b/vendor/github.com/antlr4-go/antlr/v4/lexer.go new file mode 100644 index 00000000..e5594b21 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer.go @@ -0,0 +1,426 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A lexer is recognizer that draws input symbols from a character stream. +// lexer grammars result in a subclass of this object. A Lexer object +// uses simplified Match() and error recovery mechanisms in the interest +// of speed. +/// + +type Lexer interface { + TokenSource + Recognizer + + Emit() Token + + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) +} + +type BaseLexer struct { + *BaseRecognizer + + Interpreter ILexerATNSimulator + TokenStartCharIndex int + TokenStartLine int + TokenStartColumn int + ActionType int + Virt Lexer // The most derived lexer implementation. Allows virtual method calls. + + input CharStream + factory TokenFactory + tokenFactorySourcePair *TokenSourceCharStreamPair + token Token + hitEOF bool + channel int + thetype int + modeStack IntStack + mode int + text string +} + +func NewBaseLexer(input CharStream) *BaseLexer { + + lexer := new(BaseLexer) + + lexer.BaseRecognizer = NewBaseRecognizer() + + lexer.input = input + lexer.factory = CommonTokenFactoryDEFAULT + lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} + + lexer.Virt = lexer + + lexer.Interpreter = nil // child classes must populate it + + // The goal of all lexer rules/methods is to create a token object. + // l is an instance variable as multiple rules may collaborate to + // create a single token. NextToken will return l object after + // Matching lexer rule(s). If you subclass to allow multiple token + // emissions, then set l to the last token to be Matched or + // something non nil so that the auto token emit mechanism will not + // emit another token. + lexer.token = nil + + // What character index in the stream did the current token start at? + // Needed, for example, to get the text for current token. Set at + // the start of NextToken. + lexer.TokenStartCharIndex = -1 + + // The line on which the first character of the token resides/// + lexer.TokenStartLine = -1 + + // The character position of first character within the line/// + lexer.TokenStartColumn = -1 + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF then you see DONE EOF. + lexer.hitEOF = false + + // The channel number for the current token/// + lexer.channel = TokenDefaultChannel + + // The token type for the current token/// + lexer.thetype = TokenInvalidType + + lexer.modeStack = make([]int, 0) + lexer.mode = LexerDefaultMode + + // You can set the text for the current token to override what is in + // the input char buffer. Use setText() or can set l instance var. + // / + lexer.text = "" + + return lexer +} + +const ( + LexerDefaultMode = 0 + LexerMore = -2 + LexerSkip = -3 +) + +//goland:noinspection GoUnusedConst +const ( + LexerDefaultTokenChannel = TokenDefaultChannel + LexerHidden = TokenHiddenChannel + LexerMinCharValue = 0x0000 + LexerMaxCharValue = 0x10FFFF +) + +func (b *BaseLexer) Reset() { + // wack Lexer state variables + if b.input != nil { + b.input.Seek(0) // rewind the input + } + b.token = nil + b.thetype = TokenInvalidType + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = -1 + b.TokenStartColumn = -1 + b.TokenStartLine = -1 + b.text = "" + + b.hitEOF = false + b.mode = LexerDefaultMode + b.modeStack = make([]int, 0) + + b.Interpreter.reset() +} + +func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { + return b.Interpreter +} + +func (b *BaseLexer) GetInputStream() CharStream { + return b.input +} + +func (b *BaseLexer) GetSourceName() string { + return b.GrammarFileName +} + +func (b *BaseLexer) SetChannel(v int) { + b.channel = v +} + +func (b *BaseLexer) GetTokenFactory() TokenFactory { + return b.factory +} + +func (b *BaseLexer) setTokenFactory(f TokenFactory) { + b.factory = f +} + +func (b *BaseLexer) safeMatch() (ret int) { + defer func() { + if e := recover(); e != nil { + if re, ok := e.(RecognitionException); ok { + b.notifyListeners(re) // Report error + b.Recover(re) + ret = LexerSkip // default + } + } + }() + + return b.Interpreter.Match(b.input, b.mode) +} + +// NextToken returns a token from the lexer input source i.e., Match a token on the source char stream. +func (b *BaseLexer) NextToken() Token { + if b.input == nil { + panic("NextToken requires a non-nil input stream.") + } + + tokenStartMarker := b.input.Mark() + + // previously in finally block + defer func() { + // make sure we release marker after Match or + // unbuffered char stream will keep buffering + b.input.Release(tokenStartMarker) + }() + + for { + if b.hitEOF { + b.EmitEOF() + return b.token + } + b.token = nil + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = b.input.Index() + b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() + b.TokenStartLine = b.Interpreter.GetLine() + b.text = "" + continueOuter := false + for { + b.thetype = TokenInvalidType + + ttype := b.safeMatch() // Defaults to LexerSkip + + if b.input.LA(1) == TokenEOF { + b.hitEOF = true + } + if b.thetype == TokenInvalidType { + b.thetype = ttype + } + if b.thetype == LexerSkip { + continueOuter = true + break + } + if b.thetype != LexerMore { + break + } + } + + if continueOuter { + continue + } + if b.token == nil { + b.Virt.Emit() + } + return b.token + } +} + +// Skip instructs the lexer to Skip creating a token for current lexer rule +// and look for another token. [NextToken] knows to keep looking when +// a lexer rule finishes with token set to [SKIPTOKEN]. Recall that +// if token==nil at end of any token rule, it creates one for you +// and emits it. +func (b *BaseLexer) Skip() { + b.thetype = LexerSkip +} + +func (b *BaseLexer) More() { + b.thetype = LexerMore +} + +// SetMode changes the lexer to a new mode. The lexer will use this mode from hereon in and the rules for that mode +// will be in force. +func (b *BaseLexer) SetMode(m int) { + b.mode = m +} + +// PushMode saves the current lexer mode so that it can be restored later. See [PopMode], then sets the +// current lexer mode to the supplied mode m. +func (b *BaseLexer) PushMode(m int) { + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("pushMode " + strconv.Itoa(m)) + } + b.modeStack.Push(b.mode) + b.mode = m +} + +// PopMode restores the lexer mode saved by a call to [PushMode]. It is a panic error if there is no saved mode to +// return to. +func (b *BaseLexer) PopMode() int { + if len(b.modeStack) == 0 { + panic("Empty Stack") + } + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) + } + i, _ := b.modeStack.Pop() + b.mode = i + return b.mode +} + +func (b *BaseLexer) inputStream() CharStream { + return b.input +} + +// SetInputStream resets the lexer input stream and associated lexer state. +func (b *BaseLexer) SetInputStream(input CharStream) { + b.input = nil + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} + b.Reset() + b.input = input + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} +} + +func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { + return b.tokenFactorySourcePair +} + +// EmitToken by default does not support multiple emits per [NextToken] invocation +// for efficiency reasons. Subclass and override this func, [NextToken], +// and [GetToken] (to push tokens into a list and pull from that list +// rather than a single variable as this implementation does). +func (b *BaseLexer) EmitToken(token Token) { + b.token = token +} + +// Emit is the standard method called to automatically emit a token at the +// outermost lexical rule. The token object should point into the +// char buffer start..stop. If there is a text override in 'text', +// use that to set the token's text. Override this method to emit +// custom [Token] objects or provide a new factory. +// / +func (b *BaseLexer) Emit() Token { + t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) + b.EmitToken(t) + return t +} + +// EmitEOF emits an EOF token. By default, this is the last token emitted +func (b *BaseLexer) EmitEOF() Token { + cpos := b.GetCharPositionInLine() + lpos := b.GetLine() + eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) + b.EmitToken(eof) + return eof +} + +// GetCharPositionInLine returns the current position in the current line as far as the lexer is concerned. +func (b *BaseLexer) GetCharPositionInLine() int { + return b.Interpreter.GetCharPositionInLine() +} + +func (b *BaseLexer) GetLine() int { + return b.Interpreter.GetLine() +} + +func (b *BaseLexer) GetType() int { + return b.thetype +} + +func (b *BaseLexer) SetType(t int) { + b.thetype = t +} + +// GetCharIndex returns the index of the current character of lookahead +func (b *BaseLexer) GetCharIndex() int { + return b.input.Index() +} + +// GetText returns the text Matched so far for the current token or any text override. +func (b *BaseLexer) GetText() string { + if b.text != "" { + return b.text + } + + return b.Interpreter.GetText(b.input) +} + +// SetText sets the complete text of this token; it wipes any previous changes to the text. +func (b *BaseLexer) SetText(text string) { + b.text = text +} + +// GetATN returns the ATN used by the lexer. +func (b *BaseLexer) GetATN() *ATN { + return b.Interpreter.ATN() +} + +// GetAllTokens returns a list of all [Token] objects in input char stream. +// Forces a load of all tokens that can be made from the input char stream. +// +// Does not include EOF token. +func (b *BaseLexer) GetAllTokens() []Token { + vl := b.Virt + tokens := make([]Token, 0) + t := vl.NextToken() + for t.GetTokenType() != TokenEOF { + tokens = append(tokens, t) + t = vl.NextToken() + } + return tokens +} + +func (b *BaseLexer) notifyListeners(e RecognitionException) { + start := b.TokenStartCharIndex + stop := b.input.Index() + text := b.input.GetTextFromInterval(NewInterval(start, stop)) + msg := "token recognition error at: '" + text + "'" + listener := b.GetErrorListenerDispatch() + listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) +} + +func (b *BaseLexer) getErrorDisplayForChar(c rune) string { + if c == TokenEOF { + return "" + } else if c == '\n' { + return "\\n" + } else if c == '\t' { + return "\\t" + } else if c == '\r' { + return "\\r" + } else { + return string(c) + } +} + +func (b *BaseLexer) getCharErrorDisplay(c rune) string { + return "'" + b.getErrorDisplayForChar(c) + "'" +} + +// Recover can normally Match any char in its vocabulary after Matching +// a token, so here we do the easy thing and just kill a character and hope +// it all works out. You can instead use the rule invocation stack +// to do sophisticated error recovery if you are in a fragment rule. +// +// In general, lexers should not need to recover and should have rules that cover any eventuality, such as +// a character that makes no sense to the recognizer. +func (b *BaseLexer) Recover(re RecognitionException) { + if b.input.LA(1) != TokenEOF { + if _, ok := re.(*LexerNoViableAltException); ok { + // Skip a char and try again + b.Interpreter.Consume(b.input) + } else { + // TODO: Do we lose character or line position information? + b.input.Consume() + } + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go new file mode 100644 index 00000000..eaa7393e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action.go @@ -0,0 +1,452 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +const ( + // LexerActionTypeChannel represents a [LexerChannelAction] action. + LexerActionTypeChannel = 0 + + // LexerActionTypeCustom represents a [LexerCustomAction] action. + LexerActionTypeCustom = 1 + + // LexerActionTypeMode represents a [LexerModeAction] action. + LexerActionTypeMode = 2 + + // LexerActionTypeMore represents a [LexerMoreAction] action. + LexerActionTypeMore = 3 + + // LexerActionTypePopMode represents a [LexerPopModeAction] action. + LexerActionTypePopMode = 4 + + // LexerActionTypePushMode represents a [LexerPushModeAction] action. + LexerActionTypePushMode = 5 + + // LexerActionTypeSkip represents a [LexerSkipAction] action. + LexerActionTypeSkip = 6 + + // LexerActionTypeType represents a [LexerTypeAction] action. + LexerActionTypeType = 7 +) + +type LexerAction interface { + getActionType() int + getIsPositionDependent() bool + execute(lexer Lexer) + Hash() int + Equals(other LexerAction) bool +} + +type BaseLexerAction struct { + actionType int + isPositionDependent bool +} + +func NewBaseLexerAction(action int) *BaseLexerAction { + la := new(BaseLexerAction) + + la.actionType = action + la.isPositionDependent = false + + return la +} + +func (b *BaseLexerAction) execute(_ Lexer) { + panic("Not implemented") +} + +func (b *BaseLexerAction) getActionType() int { + return b.actionType +} + +func (b *BaseLexerAction) getIsPositionDependent() bool { + return b.isPositionDependent +} + +func (b *BaseLexerAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, b.actionType) + return murmurFinish(h, 1) +} + +func (b *BaseLexerAction) Equals(other LexerAction) bool { + return b.actionType == other.getActionType() +} + +// LexerSkipAction implements the [BaseLexerAction.Skip] lexer action by calling [Lexer.Skip]. +// +// The Skip command does not have any parameters, so this action is +// implemented as a singleton instance exposed by the [LexerSkipActionINSTANCE]. +type LexerSkipAction struct { + *BaseLexerAction +} + +func NewLexerSkipAction() *LexerSkipAction { + la := new(LexerSkipAction) + la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) + return la +} + +// LexerSkipActionINSTANCE provides a singleton instance of this parameterless lexer action. +var LexerSkipActionINSTANCE = NewLexerSkipAction() + +func (l *LexerSkipAction) execute(lexer Lexer) { + lexer.Skip() +} + +// String returns a string representation of the current [LexerSkipAction]. +func (l *LexerSkipAction) String() string { + return "skip" +} + +func (b *LexerSkipAction) Equals(other LexerAction) bool { + return other.getActionType() == LexerActionTypeSkip +} + +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// +// with the assigned type. +type LexerTypeAction struct { + *BaseLexerAction + + thetype int +} + +func NewLexerTypeAction(thetype int) *LexerTypeAction { + l := new(LexerTypeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) + l.thetype = thetype + return l +} + +func (l *LexerTypeAction) execute(lexer Lexer) { + lexer.SetType(l.thetype) +} + +func (l *LexerTypeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.thetype) + return murmurFinish(h, 2) +} + +func (l *LexerTypeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return l.thetype == other.(*LexerTypeAction).thetype + } +} + +func (l *LexerTypeAction) String() string { + return "actionType(" + strconv.Itoa(l.thetype) + ")" +} + +// LexerPushModeAction implements the pushMode lexer action by calling +// [Lexer.pushMode] with the assigned mode. +type LexerPushModeAction struct { + *BaseLexerAction + mode int +} + +func NewLexerPushModeAction(mode int) *LexerPushModeAction { + + l := new(LexerPushModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) + + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//pushMode} with the +// value provided by {@link //getMode}.

+func (l *LexerPushModeAction) execute(lexer Lexer) { + lexer.PushMode(l.mode) +} + +func (l *LexerPushModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerPushModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerPushModeAction).mode + } +} + +func (l *LexerPushModeAction) String() string { + return "pushMode(" + strconv.Itoa(l.mode) + ")" +} + +// LexerPopModeAction implements the popMode lexer action by calling [Lexer.popMode]. +// +// The popMode command does not have any parameters, so this action is +// implemented as a singleton instance exposed by [LexerPopModeActionINSTANCE] +type LexerPopModeAction struct { + *BaseLexerAction +} + +func NewLexerPopModeAction() *LexerPopModeAction { + + l := new(LexerPopModeAction) + + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) + + return l +} + +var LexerPopModeActionINSTANCE = NewLexerPopModeAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerPopModeAction) execute(lexer Lexer) { + lexer.PopMode() +} + +func (l *LexerPopModeAction) String() string { + return "popMode" +} + +// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// +//

The {@code more} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+ +type LexerMoreAction struct { + *BaseLexerAction +} + +func NewLexerMoreAction() *LexerMoreAction { + l := new(LexerMoreAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) + + return l +} + +var LexerMoreActionINSTANCE = NewLexerMoreAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerMoreAction) execute(lexer Lexer) { + lexer.More() +} + +func (l *LexerMoreAction) String() string { + return "more" +} + +// LexerModeAction implements the mode lexer action by calling [Lexer.mode] with +// the assigned mode. +type LexerModeAction struct { + *BaseLexerAction + mode int +} + +func NewLexerModeAction(mode int) *LexerModeAction { + l := new(LexerModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//mode} with the +// value provided by {@link //getMode}.

+func (l *LexerModeAction) execute(lexer Lexer) { + lexer.SetMode(l.mode) +} + +func (l *LexerModeAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerModeAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerModeAction).mode + } +} + +func (l *LexerModeAction) String() string { + return "mode(" + strconv.Itoa(l.mode) + ")" +} + +// Executes a custom lexer action by calling {@link Recognizer//action} with the +// rule and action indexes assigned to the custom action. The implementation of +// a custom action is added to the generated code for the lexer in an override +// of {@link Recognizer//action} when the grammar is compiled. +// +//

This class may represent embedded actions created with the {...} +// syntax in ANTLR 4, as well as actions created for lexer commands where the +// command argument could not be evaluated when the grammar was compiled.

+ +// Constructs a custom lexer action with the specified rule and action +// indexes. +// +// @param ruleIndex The rule index to use for calls to +// {@link Recognizer//action}. +// @param actionIndex The action index to use for calls to +// {@link Recognizer//action}. + +type LexerCustomAction struct { + *BaseLexerAction + ruleIndex, actionIndex int +} + +func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { + l := new(LexerCustomAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) + l.ruleIndex = ruleIndex + l.actionIndex = actionIndex + l.isPositionDependent = true + return l +} + +//

Custom actions are implemented by calling {@link Lexer//action} with the +// appropriate rule and action indexes.

+func (l *LexerCustomAction) execute(lexer Lexer) { + lexer.Action(nil, l.ruleIndex, l.actionIndex) +} + +func (l *LexerCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.ruleIndex) + h = murmurUpdate(h, l.actionIndex) + return murmurFinish(h, 3) +} + +func (l *LexerCustomAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && + l.actionIndex == other.(*LexerCustomAction).actionIndex + } +} + +// LexerChannelAction implements the channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. +type LexerChannelAction struct { + *BaseLexerAction + channel int +} + +// NewLexerChannelAction creates a channel lexer action by calling +// [Lexer.setChannel] with the assigned channel. +// +// Constructs a new channel action with the specified channel value. +func NewLexerChannelAction(channel int) *LexerChannelAction { + l := new(LexerChannelAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) + l.channel = channel + return l +} + +//

This action is implemented by calling {@link Lexer//setChannel} with the +// value provided by {@link //getChannel}.

+func (l *LexerChannelAction) execute(lexer Lexer) { + lexer.SetChannel(l.channel) +} + +func (l *LexerChannelAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.channel) + return murmurFinish(h, 2) +} + +func (l *LexerChannelAction) Equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return l.channel == other.(*LexerChannelAction).channel + } +} + +func (l *LexerChannelAction) String() string { + return "channel(" + strconv.Itoa(l.channel) + ")" +} + +// This implementation of {@link LexerAction} is used for tracking input offsets +// for position-dependent actions within a {@link LexerActionExecutor}. +// +//

This action is not serialized as part of the ATN, and is only required for +// position-dependent lexer actions which appear at a location other than the +// end of a rule. For more information about DFA optimizations employed for +// lexer actions, see {@link LexerActionExecutor//append} and +// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

+ +type LexerIndexedCustomAction struct { + *BaseLexerAction + offset int + lexerAction LexerAction + isPositionDependent bool +} + +// NewLexerIndexedCustomAction constructs a new indexed custom action by associating a character offset +// with a [LexerAction]. +// +// Note: This class is only required for lexer actions for which +// [LexerAction.isPositionDependent] returns true. +// +// The offset points into the input [CharStream], relative to +// the token start index, at which the specified lexerAction should be +// executed. +func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { + + l := new(LexerIndexedCustomAction) + l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) + + l.offset = offset + l.lexerAction = lexerAction + l.isPositionDependent = true + + return l +} + +//

This method calls {@link //execute} on the result of {@link //getAction} +// using the provided {@code lexer}.

+func (l *LexerIndexedCustomAction) execute(lexer Lexer) { + // assume the input stream position was properly set by the calling code + l.lexerAction.execute(lexer) +} + +func (l *LexerIndexedCustomAction) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.offset) + h = murmurUpdate(h, l.lexerAction.Hash()) + return murmurFinish(h, 2) +} + +func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return l.offset == other.(*LexerIndexedCustomAction).offset && + l.lexerAction.Equals(other.(*LexerIndexedCustomAction).lexerAction) + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go new file mode 100644 index 00000000..dfc28c32 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_action_executor.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "golang.org/x/exp/slices" + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link ATNConfig//hashCode} operation. + l.cachedHash = murmurInit(0) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.Hash()) + } + l.cachedHash = murmurFinish(l.cachedHash, len(lexerActions)) + + return l +} + +// LexerActionExecutorappend creates a [LexerActionExecutor] which executes the actions for +// the input [LexerActionExecutor] followed by a specified +// [LexerAction]. +// TODO: This does not match the Java code +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// fixOffsetBeforeMatch creates a [LexerActionExecutor] which encodes the current offset +// for position-dependent lexer actions. +// +// Normally, when the executor encounters lexer actions where +// [LexerAction.isPositionDependent] returns true, it calls +// [IntStream.Seek] on the input [CharStream] to set the input +// position to the end of the current token. This behavior provides +// for efficient [DFA] representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters. +// +// Prior to traversing a Match transition in the [ATN], the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the [DFA] representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same Length, regardless of their absolute +// position in the input stream. +// +// If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns this instance. +// +// The offset is assigned to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// The func returns a [LexerActionExecutor] that stores input stream offsets +// for all position-dependent lexer actions. +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0, len(l.lexerActions)) + updatedLexerActions = append(updatedLexerActions, l.lexerActions...) + } + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) Hash() int { + if l == nil { + // TODO: Why is this here? l should not be nil + return 61 + } + + // TODO: This is created from the action itself when the struct is created - will this be an issue at some point? Java uses the runtime assign hashcode + return l.cachedHash +} + +func (l *LexerActionExecutor) Equals(other interface{}) bool { + if l == other { + return true + } + othert, ok := other.(*LexerActionExecutor) + if !ok { + return false + } + if othert == nil { + return false + } + if l.cachedHash != othert.cachedHash { + return false + } + if len(l.lexerActions) != len(othert.lexerActions) { + return false + } + return slices.EqualFunc(l.lexerActions, othert.lexerActions, func(i, j LexerAction) bool { + return i.Equals(j) + }) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go new file mode 100644 index 00000000..fe938b02 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/lexer_atn_simulator.go @@ -0,0 +1,677 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +//goland:noinspection GoUnusedGlobalVariable +var ( + LexerATNSimulatorMinDFAEdge = 0 + LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN + + LexerATNSimulatorMatchCalls = 0 +) + +type ILexerATNSimulator interface { + IATNSimulator + + reset() + Match(input CharStream, mode int) int + GetCharPositionInLine() int + GetLine() int + GetText(input CharStream) string + Consume(input CharStream) +} + +type LexerATNSimulator struct { + BaseATNSimulator + + recog Lexer + predictionMode int + mergeCache *JPCMap2 + startIndex int + Line int + CharPositionInLine int + mode int + prevAccept *SimState + MatchCalls int +} + +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + l := &LexerATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } + + l.decisionToDFA = decisionToDFA + l.recog = recog + + // The current token's starting index into the character stream. + // Shared across DFA to ATN simulation in case the ATN fails and the + // DFA did not have a previous accept state. In l case, we use the + // ATN-generated exception object. + l.startIndex = -1 + + // line number 1..n within the input + l.Line = 1 + + // The index of the character relative to the beginning of the line + // 0..n-1 + l.CharPositionInLine = 0 + + l.mode = LexerDefaultMode + + // Used during DFA/ATN exec to record the most recent accept configuration + // info + l.prevAccept = NewSimState() + + return l +} + +func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { + l.CharPositionInLine = simulator.CharPositionInLine + l.Line = simulator.Line + l.mode = simulator.mode + l.startIndex = simulator.startIndex +} + +func (l *LexerATNSimulator) Match(input CharStream, mode int) int { + l.MatchCalls++ + l.mode = mode + mark := input.Mark() + + defer func() { + input.Release(mark) + }() + + l.startIndex = input.Index() + l.prevAccept.reset() + + dfa := l.decisionToDFA[mode] + + var s0 *DFAState + l.atn.stateMu.RLock() + s0 = dfa.getS0() + l.atn.stateMu.RUnlock() + + if s0 == nil { + return l.MatchATN(input) + } + + return l.execATN(input, s0) +} + +func (l *LexerATNSimulator) reset() { + l.prevAccept.reset() + l.startIndex = -1 + l.Line = 1 + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode +} + +func (l *LexerATNSimulator) MatchATN(input CharStream) int { + startState := l.atn.modeToStartState[l.mode] + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) + } + oldMode := l.mode + s0Closure := l.computeStartState(input, startState) + suppressEdge := s0Closure.hasSemanticContext + s0Closure.hasSemanticContext = false + + next := l.addDFAState(s0Closure, suppressEdge) + + predict := l.execATN(input, next) + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) + } + return predict +} + +func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("start state closure=" + ds0.configs.String()) + } + if ds0.isAcceptState { + // allow zero-Length tokens + l.captureSimState(l.prevAccept, input, ds0) + } + t := input.LA(1) + s := ds0 // s is current/from DFA state + + for { // while more work + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("execATN loop starting closure: " + s.configs.String()) + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=nil, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=nil, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + target := l.getExistingTargetState(s, t) + if target == nil { + target = l.computeTargetState(input, s, t) + // print("Computed:" + str(target)) + } + if target == ATNSimulatorError { + break + } + // If l is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if t != TokenEOF { + l.Consume(input) + } + if target.isAcceptState { + l.captureSimState(l.prevAccept, input, target) + if t == TokenEOF { + break + } + } + t = input.LA(1) + s = target // flip current DFA target becomes new src/from state + } + + return l.failOrAccept(l.prevAccept, input, s.configs, t) +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// l method returns {@code nil}. +// +// @param s The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for l edge is not +// already cached +func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { + if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { + return nil + } + + l.atn.edgeMu.RLock() + defer l.atn.edgeMu.RUnlock() + if s.getEdges() == nil { + return nil + } + target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge) + if runtimeConfig.lexerATNSimulatorDebug && target != nil { + fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) + } + return target +} + +// computeTargetState computes a target state for an edge in the [DFA], and attempt to add the +// computed state and corresponding edge to the [DFA]. +// +// The func returns the computed target [DFA] state for the given input symbol t. +// If this does not lead to a valid [DFA] state, this method +// returns ATNSimulatorError. +func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { + reach := NewOrderedATNConfigSet() + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + l.getReachableConfigSet(input, s.configs, reach, t) + + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { + // we got nowhere on t, don't panic out l knowledge it'd + // cause a fail-over from DFA later. + l.addDFAEdge(s, t, ATNSimulatorError, nil) + } + // stop when we can't Match any more char + return ATNSimulatorError + } + // Add an edge from s to target DFA found/created for reach + return l.addDFAEdge(s, t, nil, reach) +} + +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach *ATNConfigSet, t int) int { + if l.prevAccept.dfaState != nil { + lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor + l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + } + + // if no accept and EOF is first char, return EOF + if t == TokenEOF && input.Index() == l.startIndex { + return TokenEOF + } + + panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) +} + +// getReachableConfigSet when given a starting configuration set, figures out all [ATN] configurations +// we can reach upon input t. +// +// Parameter reach is a return parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure *ATNConfigSet, reach *ATNConfigSet, t int) { + // l is used to Skip processing for configs which have a lower priority + // than a runtimeConfig that already reached an accept state for the same rule + SkipAlt := ATNInvalidAltNumber + + for _, cfg := range closure.configs { + currentAltReachedAcceptState := cfg.GetAlt() == SkipAlt + if currentAltReachedAcceptState && cfg.passedThroughNonGreedyDecision { + continue + } + + if runtimeConfig.lexerATNSimulatorDebug { + + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) + } + + for _, trans := range cfg.GetState().GetTransitions() { + target := l.getReachableTarget(trans, t) + if target != nil { + lexerActionExecutor := cfg.lexerActionExecutor + if lexerActionExecutor != nil { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) + } + treatEOFAsEpsilon := t == TokenEOF + config := NewLexerATNConfig3(cfg, target, lexerActionExecutor) + if l.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEOFAsEpsilon) { + // any remaining configs for l alt have a lower priority + // than the one that just reached an accept state. + SkipAlt = cfg.GetAlt() + } + } + } + } +} + +func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Printf("ACTION %v\n", lexerActionExecutor) + } + // seek to after last char in token + input.Seek(index) + l.Line = line + l.CharPositionInLine = charPos + if lexerActionExecutor != nil && l.recog != nil { + lexerActionExecutor.execute(l.recog, input, startIndex) + } +} + +func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { + if trans.Matches(t, 0, LexerMaxCharValue) { + return trans.getTarget() + } + + return nil +} + +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *ATNConfigSet { + configs := NewOrderedATNConfigSet() + for i := 0; i < len(p.GetTransitions()); i++ { + target := p.GetTransitions()[i].getTarget() + cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + l.closure(input, cfg, configs, false, false, false) + } + + return configs +} + +// closure since the alternatives within any lexer decision are ordered by +// preference, this method stops pursuing the closure as soon as an accept +// state is reached. After the first accept state is reached by depth-first +// search from runtimeConfig, all other (potentially reachable) states for +// this rule would have a lower priority. +// +// The func returns true if an accept state is reached. +func (l *LexerATNSimulator) closure(input CharStream, config *ATNConfig, configs *ATNConfigSet, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") + } + + _, ok := config.state.(*RuleStopState) + if ok { + + if runtimeConfig.lexerATNSimulatorDebug { + if l.recog != nil { + fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) + } else { + fmt.Printf("closure at rule stop %s\n", config) + } + } + + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { + configs.Add(config, nil) + return true + } + + configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) + currentAltReachedAcceptState = true + } + if config.context != nil && !config.context.isEmpty() { + for i := 0; i < config.context.length(); i++ { + if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { + newContext := config.context.GetParent(i) // "pop" return state + returnState := l.atn.states[config.context.getReturnState(i)] + cfg := NewLexerATNConfig2(config, returnState, newContext) + currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + } + return currentAltReachedAcceptState + } + // optimization + if !config.state.GetEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { + configs.Add(config, nil) + } + } + for j := 0; j < len(config.state.GetTransitions()); j++ { + trans := config.state.GetTransitions()[j] + cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) + if cfg != nil { + currentAltReachedAcceptState = l.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + return currentAltReachedAcceptState +} + +// side-effect: can alter configs.hasSemanticContext +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *ATNConfig, trans Transition, + configs *ATNConfigSet, speculative, treatEOFAsEpsilon bool) *ATNConfig { + + var cfg *ATNConfig + + if trans.getSerializationType() == TransitionRULE { + + rt := trans.(*RuleTransition) + newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) + + } else if trans.getSerializationType() == TransitionPRECEDENCE { + panic("Precedence predicates are not supported in lexers.") + } else if trans.getSerializationType() == TransitionPREDICATE { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for l "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // l predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since l is when we + // test them, we cannot cash the DFA state target of ID. + + pt := trans.(*PredicateTransition) + + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) + } + configs.hasSemanticContext = true + if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In this case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) + cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) + } else { + // ignore actions in referenced rules + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionEPSILON { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEOFAsEpsilon { + if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } + } + return cfg +} + +// evaluatePredicate eEvaluates a predicate specified in the lexer. +// +// If speculative is true, this method was called before +// [consume] for the Matched character. This method should call +// [consume] before evaluating the predicate to ensure position +// sensitive values, including [GetText], [GetLine], +// and [GetColumn], properly reflect the current +// lexer state. This method should restore input and the simulator +// to the original state before returning, i.e. undo the actions made by the +// call to [Consume]. +// +// The func returns true if the specified predicate evaluates to true. +func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { + // assume true if no recognizer was provided + if l.recog == nil { + return true + } + if !speculative { + return l.recog.Sempred(nil, ruleIndex, predIndex) + } + savedcolumn := l.CharPositionInLine + savedLine := l.Line + index := input.Index() + marker := input.Mark() + + defer func() { + l.CharPositionInLine = savedcolumn + l.Line = savedLine + input.Seek(index) + input.Release(marker) + }() + + l.Consume(input) + return l.recog.Sempred(nil, ruleIndex, predIndex) +} + +func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { + settings.index = input.Index() + settings.line = l.Line + settings.column = l.CharPositionInLine + settings.dfaState = dfaState +} + +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs *ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { + // leading to l call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes l edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to reSynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // + suppressEdge := cfgs.hasSemanticContext + cfgs.hasSemanticContext = false + to = l.addDFAState(cfgs, true) + + if suppressEdge { + return to + } + } + // add the edge + if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { + // Only track edges within the DFA bounds + return to + } + if runtimeConfig.lexerATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) + } + l.atn.edgeMu.Lock() + defer l.atn.edgeMu.Unlock() + if from.getEdges() == nil { + // make room for tokens 1..n and -1 masquerading as index 0 + from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1)) + } + from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect + + return to +} + +// Add a NewDFA state if there isn't one with l set of +// configurations already. This method also detects the first +// configuration containing an ATN rule stop state. Later, when +// traversing the DFA, we will know which rule to accept. +func (l *LexerATNSimulator) addDFAState(configs *ATNConfigSet, suppressEdge bool) *DFAState { + + proposed := NewDFAState(-1, configs) + var firstConfigWithRuleStopState *ATNConfig + + for _, cfg := range configs.configs { + _, ok := cfg.GetState().(*RuleStopState) + + if ok { + firstConfigWithRuleStopState = cfg + break + } + } + if firstConfigWithRuleStopState != nil { + proposed.isAcceptState = true + proposed.lexerActionExecutor = firstConfigWithRuleStopState.lexerActionExecutor + proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) + } + dfa := l.decisionToDFA[l.mode] + + l.atn.stateMu.Lock() + defer l.atn.stateMu.Unlock() + existing, present := dfa.Get(proposed) + if present { + + // This state was already present, so just return it. + // + proposed = existing + } else { + + // We need to add the new state + // + proposed.stateNumber = dfa.Len() + configs.readOnly = true + configs.configLookup = nil // Not needed now + proposed.configs = configs + dfa.Put(proposed) + } + if !suppressEdge { + dfa.setS0(proposed) + } + return proposed +} + +func (l *LexerATNSimulator) getDFA(mode int) *DFA { + return l.decisionToDFA[mode] +} + +// GetText returns the text [Match]ed so far for the current token. +func (l *LexerATNSimulator) GetText(input CharStream) string { + // index is first lookahead char, don't include. + return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) +} + +func (l *LexerATNSimulator) Consume(input CharStream) { + curChar := input.LA(1) + if curChar == int('\n') { + l.Line++ + l.CharPositionInLine = 0 + } else { + l.CharPositionInLine++ + } + input.Consume() +} + +func (l *LexerATNSimulator) GetCharPositionInLine() int { + return l.CharPositionInLine +} + +func (l *LexerATNSimulator) GetLine() int { + return l.Line +} + +func (l *LexerATNSimulator) GetTokenName(tt int) string { + if tt == -1 { + return "EOF" + } + + var sb strings.Builder + sb.Grow(6) + sb.WriteByte('\'') + sb.WriteRune(rune(tt)) + sb.WriteByte('\'') + + return sb.String() +} + +func resetSimState(sim *SimState) { + sim.index = -1 + sim.line = 0 + sim.column = -1 + sim.dfaState = nil +} + +type SimState struct { + index int + line int + column int + dfaState *DFAState +} + +func NewSimState() *SimState { + s := new(SimState) + resetSimState(s) + return s +} + +func (s *SimState) reset() { + resetSimState(s) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go new file mode 100644 index 00000000..dfdff000 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/ll1_analyzer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +const ( + // LL1AnalyzerHitPred is a special value added to the lookahead sets to indicate that we hit + // a predicate during analysis if + // + // seeThruPreds==false + LL1AnalyzerHitPred = TokenInvalidType +) + +// * +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + + look[alt] = NewIntervalSet() + // TODO: This is one of the reasons that ATNConfigs are allocated and freed all the time - fix this tomorrow jim! + lookBusy := NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.getDecisionLookahead for lookBusy") + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), false, false) + + // Wipe out lookahead for la alternative if we found nothing, + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +// Look computes the set of tokens that can follow s in the [ATN] in the +// specified ctx. +// +// If ctx is nil and the end of the rule containing +// s is reached, [EPSILON] is added to the result set. +// +// If ctx is not nil and the end of the outermost rule is +// reached, [EOF] is added to the result set. +// +// Parameter s the ATN state, and stopState is the ATN state to stop at. This can be a +// [BlockEndState] to detect epsilon paths through a closure. +// +// Parameter ctx is the complete parser context, or nil if the context +// should be ignored +// +// The func returns the set of tokens that can follow s in the [ATN] in the +// specified ctx. +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + var lookContext *PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, "LL1Analyzer.Look for la.look1()"), + NewBitSet(), true, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(_, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewATNConfig6(s, 0, ctx) + + if lookBusy.Contains(c) { + return + } + + _, present := lookBusy.Put(c) + if present { + return + + } + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx.pcType != PredictionContextEmpty { + removed := calledRuleStack.contains(s.GetRuleIndex()) + defer func() { + if removed { + calledRuleStack.add(s.GetRuleIndex()) + } + }() + calledRuleStack.remove(s.GetRuleIndex()) + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx *PredictionContext, look *IntervalSet, lookBusy *JStore[*ATNConfig, Comparator[*ATNConfig]], + calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex.go new file mode 100644 index 00000000..2b0cda47 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex.go @@ -0,0 +1,41 @@ +//go:build !antlr.nomutex +// +build !antlr.nomutex + +package antlr + +import "sync" + +// Mutex is a simple mutex implementation which just delegates to sync.Mutex, it +// is used to provide a mutex implementation for the antlr package, which users +// can turn off with the build tag -tags antlr.nomutex +type Mutex struct { + mu sync.Mutex +} + +func (m *Mutex) Lock() { + m.mu.Lock() +} + +func (m *Mutex) Unlock() { + m.mu.Unlock() +} + +type RWMutex struct { + mu sync.RWMutex +} + +func (m *RWMutex) Lock() { + m.mu.Lock() +} + +func (m *RWMutex) Unlock() { + m.mu.Unlock() +} + +func (m *RWMutex) RLock() { + m.mu.RLock() +} + +func (m *RWMutex) RUnlock() { + m.mu.RUnlock() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go new file mode 100644 index 00000000..35ce4353 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/mutex_nomutex.go @@ -0,0 +1,32 @@ +//go:build antlr.nomutex +// +build antlr.nomutex + +package antlr + +type Mutex struct{} + +func (m *Mutex) Lock() { + // No-op +} + +func (m *Mutex) Unlock() { + // No-op +} + +type RWMutex struct{} + +func (m *RWMutex) Lock() { + // No-op +} + +func (m *RWMutex) Unlock() { + // No-op +} + +func (m *RWMutex) RLock() { + // No-op +} + +func (m *RWMutex) RUnlock() { + // No-op +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go new file mode 100644 index 00000000..923c7b52 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/nostatistics.go @@ -0,0 +1,47 @@ +//go:build !antlr.stats + +package antlr + +// This file is compiled when the build configuration antlr.stats is not enabled. +// which then allows the compiler to optimize out all the code that is not used. +const collectStats = false + +// goRunStats is a dummy struct used when build configuration antlr.stats is not enabled. +type goRunStats struct { +} + +var Statistics = &goRunStats{} + +func (s *goRunStats) AddJStatRec(_ *JStatRec) { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) CollectionAnomalies() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Reset() { + // Do nothing - compiler will optimize this out (hopefully) +} + +func (s *goRunStats) Report(dir string, prefix string) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func (s *goRunStats) Analyze() { + // Do nothing - compiler will optimize this out (hopefully) +} + +type statsOption func(*goRunStats) error + +func (s *goRunStats) Configure(options ...statsOption) error { + // Do nothing - compiler will optimize this out (hopefully) + return nil +} + +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + return nil + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser.go b/vendor/github.com/antlr4-go/antlr/v4/parser.go new file mode 100644 index 00000000..fb57ac15 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/parser.go @@ -0,0 +1,700 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// NewBaseParser contains all the parsing support code to embed in parsers. Essentially most of it is error +// recovery stuff. +// +//goland:noinspection GoUnusedExportedFunction +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + + // The ParserRuleContext object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + + // Specifies whether the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + + // When setTrace(true) is called, a reference to the + // TraceListener is stored here, so it can be easily removed in a + // later call to setTrace(false). The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + + // The list of ParseTreeListener listeners registered to receive + // events during the parse. + p.parseListeners = nil + + // The number of syntax errors Reported during parsing. p.value is + // incremented each time NotifyErrorListeners is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// This field maps from the serialized ATN string to the deserialized [ATN] with +// bypass alternatives. +// +// [ATNDeserializationOptions.isGenerateRuleBypassTransitions] +// +//goland:noinspection GoUnusedGlobalVariable +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.HasError() { + return nil + } + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a new token during single token + // insertion if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// AddParseListener registers listener to receive events during the parsing process. +// +// To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// [ParseTreeWalker.DEFAULT] used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted. +// +// With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same. +// +// - Alterations to the grammar used to generate code may change the +// behavior of the listener calls. +// - Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls. +// - Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls. +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// RemoveParseListener removes listener from the list of parse listeners. +// +// If listener is nil or has not been added as a parse +// listener, this func does nothing. +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// TriggerEnterRuleEvent notifies all parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// TriggerExitRuleEvent notifies any parse listeners of an exit rule event. +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// setTokenFactory is used to tell our token source and error strategy about a new way to create tokens. +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// GetATNWithBypassAlts - the ATN with bypass alternatives is expensive to create, so we create it +// lazily. +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO - Implement this? + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+// 
+ +//goland:noinspection GoUnusedParameter +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// SetTokenStream installs input as the token stream and resets the parser. +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// GetCurrentToken returns the current token at LT(1). +// +// [Match] needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.InErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, _ int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have a new localctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, _, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, _ int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + _, _ = p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(_ RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +//goland:noinspection GoUnusedParameter +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// IsExpectedToken checks whether symbol can follow the current state in the +// {ATN}. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +// return getExpectedTokens().contains(symbol) +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// GetExpectedTokens and returns the set of input symbols which could follow the current parser +// state and context, as given by [GetState] and [GetContext], +// respectively. +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// GetRuleIndex get a rule's index (i.e., RULE_ruleName field) or -1 if not found. +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// GetRuleInvocationStack returns a list of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// GetDFAStrings returns a list of all DFA states used for debugging purposes +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// DumpDFA prints the whole of the DFA for debugging +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.Len() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// SetTrace installs a trace listener for the parse. +// +// During a parse it is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. This is for quick and dirty debugging. +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go new file mode 100644 index 00000000..724fa17a --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_atn_simulator.go @@ -0,0 +1,1666 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// ClosureBusy is a store of ATNConfigs and is a tiny abstraction layer over +// a standard JStore so that we can use Lazy instantiation of the JStore, mostly +// to avoid polluting the stats module with a ton of JStore instances with nothing in them. +type ClosureBusy struct { + bMap *JStore[*ATNConfig, Comparator[*ATNConfig]] + desc string +} + +// NewClosureBusy creates a new ClosureBusy instance used to avoid infinite recursion for right-recursive rules +func NewClosureBusy(desc string) *ClosureBusy { + return &ClosureBusy{ + desc: desc, + } +} + +func (c *ClosureBusy) Put(config *ATNConfig) (*ATNConfig, bool) { + if c.bMap == nil { + c.bMap = NewJStore[*ATNConfig, Comparator[*ATNConfig]](aConfEqInst, ClosureBusyCollection, c.desc) + } + return c.bMap.Put(config) +} + +type ParserATNSimulator struct { + BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *JPCMap + outerContext ParserRuleContext +} + +//goland:noinspection GoUnusedExportedFunction +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := &ParserATNSimulator{ + BaseATNSimulator: BaseATNSimulator{ + atn: atn, + sharedContextCache: sharedContextCache, + }, + } + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. [JPCMap] + // isn't Synchronized, but we're ok since two threads shouldn't reuse same + // parser/atn-simulator object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b) -> c. We can avoid + // the merge if we ever see a and b again. Note that (b,a) -> c should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) AdaptivePredict(parser *BaseParser, input TokenStream, decision int, outerContext ParserRuleContext) int { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("adaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // whack cache after each prediction + // Do not attempt to run a GC now that we're done with the cache as makes the + // GC overhead terrible for badly formed grammars and has little effect on well formed + // grammars. + // I have made some extra effort to try and reduce memory pressure by reusing allocations when + // possible. However, it can only have a limited effect. The real solution is to encourage grammar + // authors to think more carefully about their grammar and to use the new antlr.stats tag to inspect + // what is happening at runtime, along with using the error listener to report ambiguities. + + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + p.atn.stateMu.RLock() + if dfa.getPrecedenceDfa() { + p.atn.edgeMu.RLock() + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + p.atn.edgeMu.RUnlock() + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.getS0() + } + p.atn.stateMu.RUnlock() + + if s0 == nil { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, ParserRuleContextEmpty, fullCtx) + + p.atn.stateMu.Lock() + if dfa.getPrecedenceDfa() { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + dfa.s0.configs = s0Closure + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + p.atn.edgeMu.Lock() + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + p.atn.edgeMu.Unlock() + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setS0(s0) + } + p.atn.stateMu.Unlock() + } + + alt, re := p.execATN(dfa, s0, input, index, outerContext) + parser.SetError(re) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// execATN performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. +// +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// +// - If the set is empty, there is no viable alternative for current symbol +// - Does the state uniquely predict an alternative? +// - Does the state have a conflict that would prevent us from +// putting it on the work list? +// +// We also have some key operations to do: +// +// - Add an edge from previous DFA state to potentially NewDFA state, D, +// - Upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// - Collecting predicates and adding semantic context to DFA accept states +// - adding rule context to context-sensitive DFA accept states +// - Consuming an input symbol +// - Reporting a conflict +// - Reporting an ambiguity +// - Reporting a context sensitivity +// - Reporting insufficient predicates +// +// Cover these cases: +// +// - dead end +// - single alt +// - single alt + predicates +// - conflict +// - conflict + predicates +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + ", DFA state " + s0.String() + + ", LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + p.parser.SetError(e) + return ATNInvalidAltNumber, e + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.conflictingAlts + if D.predicates != nil { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL fail-over") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue(), nil + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt, re := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt, re + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction, nil + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + + switch alts.length() { + case 0: + return ATNInvalidAltNumber, p.noViableAlt(input, outerContext, D.configs, startIndex) + case 1: + return alts.minValue(), nil + default: + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue(), nil + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + if t+1 < 0 { + return nil + } + + p.atn.edgeMu.RLock() + defer p.atn.edgeMu.RUnlock() + edges := previousD.getEdges() + if edges == nil || t+1 >= len(edges) { + return nil + } + return previousD.getIthEdge(t + 1) +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create new target state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if runtimeConfig.parserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.uniqueAlt = predictedAlt + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.conflictingAlts = p.getConflictingAlts(reach) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.conflictingAlts.minValue()) + } + if D.isAcceptState && D.configs.hasSemanticContext { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 *ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) (int, RecognitionException) { + + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach *ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt, nil + } + return alt, p.noViableAlt(input, outerContext, previous, startIndex) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.uniqueAlt = p.getUniqueAlt(reach) + // unique prediction? + if reach.uniqueAlt != ATNInvalidAltNumber { + predictedAlt = reach.uniqueAlt + break + } + if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.uniqueAlt != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt, nil + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have non-conflicting alt subsets as in: + // + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + // + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach) + + return predictedAlt, nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeReachSet(closure *ATNConfigSet, t int, fullCtx bool) *ATNConfigSet { + if p.mergeCache == nil { + p.mergeCache = NewJPCMap(ReachSetCollection, "Merge cache for computeReachSet()") + } + intermediate := NewATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var skippedStopStates []*ATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.configs { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + if _, ok := c.GetState().(*RuleStopState); ok { + if fullCtx || t == TokenEOF { + skippedStopStates = append(skippedStopStates, c) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for _, trans := range c.GetState().GetTransitions() { + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + + // Now figure out where the reach operation can take us... + var reach *ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if skippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewATNConfigSet(fullCtx) + closureBusy := NewClosureBusy("ParserATNSimulator.computeReachSet() make a closureBusy") + treatEOFAsEpsilon := t == TokenEOF + amount := len(intermediate.configs) + for k := 0; k < amount; k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet this condition whether it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach.Equals(intermediate)) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(skippedStopStates); l++ { + reach.Add(skippedStopStates[l], p.mergeCache) + } + } + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeReachSet " + closure.String() + " -> " + reach.String()) + } + + if len(reach.configs) == 0 { + return nil + } + + return reach +} + +// removeAllConfigsNotInRuleStopState returns a configuration set containing only the configurations from +// configs which are in a [RuleStopState]. If all +// configurations in configs are already in a rule stop state, this +// method simply returns configs. +// +// When lookToEndOfRule is true, this method uses +// [ATN].[NextTokens] for each configuration in configs which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions. +// +// When lookToEndOfRule is true, this method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// configs. +// +// The func returns configs if all configurations in configs are in a +// rule stop state, otherwise it returns a new configuration set containing only +// the configurations from configs which are in a rule stop state +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs *ATNConfigSet, lookToEndOfRule bool) *ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewATNConfigSet(configs.fullCtx) + for _, config := range configs.configs { + if _, ok := config.GetState().(*RuleStopState); ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) *ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewATNConfigSet(fullCtx) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("computeStartState from ATN state " + a.String() + + " initialContext=" + initialContext.String()) + } + + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewATNConfig6(target, i+1, initialContext) + closureBusy := NewClosureBusy("ParserATNSimulator.computeStartState() make a closureBusy") + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// applyPrecedenceFilter transforms the start state computed by +// [computeStartState] to the special start state used by a +// precedence [DFA] for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +// 1. Evaluate the precedence predicates for each configuration using +// [SemanticContext].evalPrecedence. +// 2. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. +// +// Transformation 2 is valid for the following reasons: +// +// - The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule. +// - The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level. +// +// The prediction context must be considered by this filter to address +// situations like the following: +// +// grammar TA +// prog: statement* EOF +// statement: letterA | statement letterA 'b' +// letterA: 'a' +// +// In the above grammar, the [ATN] state immediately before the token +// reference 'a' in letterA is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// statement. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to prog, and then back in to statement +// from being eliminated by the filter. +// +// The func returns the transformed configuration set representing the start state +// for a precedence [DFA] at a particular precedence level (determined by +// calling [Parser].getPrecedence). +func (p *ParserATNSimulator) applyPrecedenceFilter(configs *ATNConfigSet) *ATNConfigSet { + + statesFromAlt1 := make(map[int]*PredictionContext) + configSet := NewATNConfigSet(configs.fullCtx) + + for _, config := range configs.configs { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.configs { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.Equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs *ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.configs { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i <= nalts; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // unambiguous alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // un-predicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule is used to improve the localization of error messages by +// choosing an alternative rather than panic a NoViableAltException in particular prediction scenarios where the +// Error state was reached during [ATN] simulation. +// +// The default implementation of this method uses the following +// algorithm to identify an [ATN] configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// [ParserRuleContext] returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location. +// +// - If a syntactically valid path or paths reach the end of the decision rule, and +// they are semantically valid if predicated, return the min associated alt. +// - Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +// - Otherwise, return [ATNInvalidAltNumber]. +// +// In some scenarios, the algorithm described above could predict an +// alternative which will result in a [FailedPredicateException] in +// the parser. Specifically, this could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing this alternative within +// [AdaptivePredict] instead of panic a [NoViableAltException], the resulting +// [FailedPredicateException] in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +// +// pass in the configs holding ATN configurations which were valid immediately before +// the ERROR state was reached, outerContext as the initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// The func returns the value to return from [AdaptivePredict], or +// [ATNInvalidAltNumber] if a suitable alternative was not +// identified and [AdaptivePredict] should report an error instead. +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs *ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.configs) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs *ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.configs { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 *ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs *ATNConfigSet, outerContext ParserRuleContext) []*ATNConfigSet { + succeeded := NewATNConfigSet(configs.fullCtx) + failed := NewATNConfigSet(configs.fullCtx) + + for _, c := range configs.configs { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []*ATNConfigSet{succeeded, failed} +} + +// evalSemanticContext looks through a list of predicate/alt pairs, returning alts for the +// pairs that win. A [SemanticContextNone] predicate indicates an alt containing an +// un-predicated runtimeConfig which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + var stack []*ATNConfig + visited := make(map[*ATNConfig]bool) + + stack = append(stack, config) + + for len(stack) > 0 { + currConfig := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + if _, ok := visited[currConfig]; ok { + continue + } + visited[currConfig] = true + + if _, ok := currConfig.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !currConfig.GetContext().isEmpty() { + for i := 0; i < currConfig.GetContext().length(); i++ { + if currConfig.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(currConfig, currConfig.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[currConfig.GetContext().getReturnState(i)] + newContext := currConfig.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, currConfig.GetAlt(), newContext, currConfig.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(currConfig.GetReachesIntoOuterContext()) + + stack = append(stack, c) + } + continue + } else if fullCtx { + // reached end of start rule + configs.Add(currConfig, p.mergeCache) + continue + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(currConfig.GetState().GetRuleIndex())) + } + } + } + + p.closureWork(currConfig, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureCheckingStopStateRecursive(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("closure(" + config.String() + ")") + } + + if _, ok := config.GetState().(*RuleStopState); ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + nb := NewATNConfig1(config, config.GetState(), BasePredictionContextEMPTY) + configs.Add(nb, p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) closureWork(config *ATNConfig, configs *ATNConfigSet, closureBusy *ClosureBusy, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) { + continue + } + + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if c != nil { + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if this is > 0. + + if p.dfa != nil && p.dfa.getPrecedenceDfa() { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for right-recursive rules + continue + } + + configs.dipsIntoOuterContext = true // TODO: can remove? only care when we add to set per middle of this method + newDepth-- + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else { + + if !t.getIsEpsilon() { + _, present := closureBusy.Put(c) + if present { + // avoid infinite recursion for EOF* and EOF+ + continue + } + } + if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config *ATNConfig) bool { + if !runtimeConfig.lRLoopEntryBranchOpt { + return false + } + + _p := config.GetState() + + // First check to see if we are in StarLoopEntryState generated during + // left-recursion elimination. For efficiency, also check if + // the context has an empty stack case. If so, it would mean + // global FOLLOW so we can't perform optimization + if _p.GetStateType() != ATNStateStarLoopEntry { + return false + } + startLoop, ok := _p.(*StarLoopEntryState) + if !ok { + return false + } + if !startLoop.precedenceRuleDecision || + config.GetContext().isEmpty() || + config.GetContext().hasEmptyPath() { + return false + } + + // Require all return states to return back to the same rule + // that p is in. + numCtxs := config.GetContext().length() + for i := 0; i < numCtxs; i++ { + returnState := p.atn.states[config.GetContext().getReturnState(i)] + if returnState.GetRuleIndex() != _p.GetRuleIndex() { + return false + } + } + x := _p.GetTransitions()[0].getTarget() + decisionStartState := x.(BlockStartState) + blockEndStateNum := decisionStartState.getEndState().stateNumber + blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState) + + // Verify that the top of each stack context leads to loop entry/exit + // state through epsilon edges and w/o leaving rule. + + for i := 0; i < numCtxs; i++ { // for each stack context + returnStateNumber := config.GetContext().getReturnState(i) + returnState := p.atn.states[returnStateNumber] + + // all states must have single outgoing epsilon edge + if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() { + return false + } + + // Look for prefix op case like 'not expr', (' type ')' expr + returnStateTarget := returnState.GetTransitions()[0].getTarget() + if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p { + continue + } + + // Look for 'expr op expr' or case where expr's return state is block end + // of (...)* internal block; the block end points to loop back + // which points to p but we don't need to check that + if returnState == blockEndState { + continue + } + + // Look for ternary expr ? expr : expr. The return state points at block end, + // which points at loop entry state + if returnStateTarget == blockEndState { + continue + } + + // Look for complex prefix 'between expr and expr' case where 2nd expr's + // return state points at block end state of (...)* internal block + if returnStateTarget.GetStateType() == ATNStateBlockEnd && + len(returnStateTarget.GetTransitions()) == 1 && + returnStateTarget.GetTransitions()[0].getIsEpsilon() && + returnStateTarget.GetTransitions()[0].getTarget() == _p { + continue + } + + // anything else ain't conforming + return false + } + + return true +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + var sb strings.Builder + sb.Grow(32) + + sb.WriteString("') + return sb.String() +} + +func (p *ParserATNSimulator) getEpsilonTarget(config *ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) *ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewATNConfig4(config, t.getTarget()) + case TransitionATOM, TransitionRANGE, TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) actionTransition(config *ATNConfig, t *ActionTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewATNConfig4(config, t.getTarget()) +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) precedenceTransition(config *ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the runtimeConfig sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("runtimeConfig from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) predTransition(config *ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *ATNConfig { + + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *ATNConfig + if collectPredicates && (!pt.isCtxDependent || inContext) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewATNConfig4(config, pt.getTarget()) + } + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ruleTransition(config *ATNConfig, t *RuleTransition) *ATNConfig { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs *ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// getConflictingAltsOrUniqueAlt Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ;, it has a [DFA] +// state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]]. +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node +// because alternative to has another way to continue, via +// +// [6|2|[]]. +// +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state config-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd non-conflicting configuration for a different alternative: +// +// [1|1|[], 1|2|[], 8|3|[]]. +// +// This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, so we do not +// stop working on this state. +// +// In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs *ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.uniqueAlt != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.uniqueAlt) + } else { + conflictingAlts = configs.conflictingAlts + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetLiteralNames()) { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil && t < len(p.parser.GetSymbolicNames()) { + return p.parser.GetSymbolicNames()[t] + "<" + strconv.Itoa(t) + ">" + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in [AdaptivePredict] around [execATN], but I cut +// it out for clarity now that alg. works well. We can leave this +// "dead" code for a bit. +func (p *ParserATNSimulator) dumpDeadEndConfigs(_ *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs *ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs *ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.configs { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if runtimeConfig.parserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + p.atn.stateMu.Lock() + to = p.addDFAState(dfa, to) // used existing if possible not incoming + p.atn.stateMu.Unlock() + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + p.atn.edgeMu.Lock() + if from.getEdges() == nil { + from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1)) + } + from.setIthEdge(t+1, to) // connect + p.atn.edgeMu.Unlock() + + if runtimeConfig.parserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// addDFAState adds state D to the [DFA] if it is not already present, and returns +// the actual instance stored in the [DFA]. If a state equivalent to D +// is already in the [DFA], the existing state is returned. Otherwise, this +// method returns D after adding it to the [DFA]. +// +// If D is [ATNSimulatorError], this method returns [ATNSimulatorError] and +// does not change the DFA. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + + existing, present := dfa.Get(d) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Print("addDFAState " + d.String() + " exists") + } + return existing + } + + // The state will be added if not already there or we will be given back the existing state struct + // if it is present. + // + d.stateNumber = dfa.Len() + if !d.configs.readOnly { + d.configs.OptimizeConfigs(&p.BaseATNSimulator) + d.configs.readOnly = true + d.configs.configLookup = nil + } + dfa.Put(d) + + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("addDFAState new " + d.String()) + } + + return d +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs *ATNConfigSet, startIndex, stopIndex int) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// ReportAmbiguity reports and ambiguity in the parse, which shows that the parser will explore a different route. +// +// If context-sensitive parsing, we know it's an ambiguity not a conflict or error, but we can report it to the developer +// so that they can see that this is happening and can take action if they want to. +// +//goland:noinspection GoBoolExpressions +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, _ *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs *ATNConfigSet) { + if runtimeConfig.parserATNSimulatorDebug || runtimeConfig.parserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go new file mode 100644 index 00000000..c249bc13 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/parser_rule_context.go @@ -0,0 +1,421 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "reflect" + "strconv" +) + +type ParserRuleContext interface { + RuleContext + + SetException(RecognitionException) + + AddTokenNode(token Token) *TerminalNodeImpl + AddErrorNode(badToken Token) *ErrorNodeImpl + + EnterRule(listener ParseTreeListener) + ExitRule(listener ParseTreeListener) + + SetStart(Token) + GetStart() Token + + SetStop(Token) + GetStop() Token + + AddChild(child RuleContext) RuleContext + RemoveLastChild() +} + +type BaseParserRuleContext struct { + parentCtx RuleContext + invokingState int + RuleIndex int + + start, stop Token + exception RecognitionException + children []Tree +} + +func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { + prc := new(BaseParserRuleContext) + InitBaseParserRuleContext(prc, parent, invokingStateNumber) + return prc +} + +func InitBaseParserRuleContext(prc *BaseParserRuleContext, parent ParserRuleContext, invokingStateNumber int) { + // What context invoked b rule? + prc.parentCtx = parent + + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + prc.invokingState = -1 + } else { + prc.invokingState = invokingStateNumber + } + + prc.RuleIndex = -1 + // * If we are debugging or building a parse tree for a Visitor, + // we need to track all of the tokens and rule invocations associated + // with prc rule's context. This is empty for parsing w/o tree constr. + // operation because we don't the need to track the details about + // how we parse prc rule. + // / + prc.children = nil + prc.start = nil + prc.stop = nil + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is {@code nil}. + prc.exception = nil +} + +func (prc *BaseParserRuleContext) SetException(e RecognitionException) { + prc.exception = e +} + +func (prc *BaseParserRuleContext) GetChildren() []Tree { + return prc.children +} + +func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { + // from RuleContext + prc.parentCtx = ctx.parentCtx + prc.invokingState = ctx.invokingState + prc.children = nil + prc.start = ctx.start + prc.stop = ctx.stop +} + +func (prc *BaseParserRuleContext) GetText() string { + if prc.GetChildCount() == 0 { + return "" + } + + var s string + for _, child := range prc.children { + s += child.(ParseTree).GetText() + } + + return s +} + +// EnterRule is called when any rule is entered. +func (prc *BaseParserRuleContext) EnterRule(_ ParseTreeListener) { +} + +// ExitRule is called when any rule is exited. +func (prc *BaseParserRuleContext) ExitRule(_ ParseTreeListener) { +} + +// * Does not set parent link other add methods do that +func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +// RemoveLastChild is used by [EnterOuterAlt] to toss out a [RuleContext] previously added as +// we entered a rule. If we have a label, we will need to remove +// the generic ruleContext object. +func (prc *BaseParserRuleContext) RemoveLastChild() { + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] + } +} + +func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { + + node := NewTerminalNodeImpl(token) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node + +} + +func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { + node := NewErrorNodeImpl(badToken) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node +} + +func (prc *BaseParserRuleContext) GetChild(i int) Tree { + if prc.children != nil && len(prc.children) >= i { + return prc.children[i] + } + + return nil +} + +func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { + if childType == nil { + return prc.GetChild(i).(RuleContext) + } + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if reflect.TypeOf(child) == childType { + if i == 0 { + return child.(RuleContext) + } + + i-- + } + } + + return nil +} + +func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { + return TreesStringTree(prc, ruleNames, recog) +} + +func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { + return prc +} + +func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { + return visitor.VisitChildren(prc) +} + +func (prc *BaseParserRuleContext) SetStart(t Token) { + prc.start = t +} + +func (prc *BaseParserRuleContext) GetStart() Token { + return prc.start +} + +func (prc *BaseParserRuleContext) SetStop(t Token) { + prc.stop = t +} + +func (prc *BaseParserRuleContext) GetStop() Token { + return prc.stop +} + +func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if c2, ok := child.(TerminalNode); ok { + if c2.GetSymbol().GetTokenType() == ttype { + if i == 0 { + return c2 + } + + i-- + } + } + } + return nil +} + +func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { + if prc.children == nil { + return make([]TerminalNode, 0) + } + + tokens := make([]TerminalNode, 0) + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if tchild, ok := child.(TerminalNode); ok { + if tchild.GetSymbol().GetTokenType() == ttype { + tokens = append(tokens, tchild) + } + } + } + + return tokens +} + +func (prc *BaseParserRuleContext) GetPayload() interface{} { + return prc +} + +func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { + if prc.children == nil || i < 0 || i >= len(prc.children) { + return nil + } + + j := -1 // what element have we found with ctxType? + for _, o := range prc.children { + + childType := reflect.TypeOf(o) + + if childType.Implements(ctxType) { + j++ + if j == i { + return o.(RuleContext) + } + } + } + return nil +} + +// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do +// check for convertibility + +func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { + return prc.getChild(ctxType, i) +} + +func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { + if prc.children == nil { + return make([]RuleContext, 0) + } + + contexts := make([]RuleContext, 0) + + for _, child := range prc.children { + childType := reflect.TypeOf(child) + + if childType.ConvertibleTo(ctxType) { + contexts = append(contexts, child.(RuleContext)) + } + } + return contexts +} + +func (prc *BaseParserRuleContext) GetChildCount() int { + if prc.children == nil { + return 0 + } + + return len(prc.children) +} + +func (prc *BaseParserRuleContext) GetSourceInterval() Interval { + if prc.start == nil || prc.stop == nil { + return TreeInvalidInterval + } + + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) +} + +//need to manage circular dependencies, so export now + +// Print out a whole tree, not just a node, in LISP format +// (root child1 .. childN). Print just a node if b is a leaf. +// + +func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { + + var p ParserRuleContext = prc + s := "[" + for p != nil && p != stop { + if ruleNames == nil { + if !p.IsEmpty() { + s += strconv.Itoa(p.GetInvokingState()) + } + } else { + ri := p.GetRuleIndex() + var ruleName string + if ri >= 0 && ri < len(ruleNames) { + ruleName = ruleNames[ri] + } else { + ruleName = strconv.Itoa(ri) + } + s += ruleName + } + if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { + s += " " + } + pi := p.GetParent() + if pi != nil { + p = pi.(ParserRuleContext) + } else { + p = nil + } + } + s += "]" + return s +} + +func (prc *BaseParserRuleContext) SetParent(v Tree) { + if v == nil { + prc.parentCtx = nil + } else { + prc.parentCtx = v.(RuleContext) + } +} + +func (prc *BaseParserRuleContext) GetInvokingState() int { + return prc.invokingState +} + +func (prc *BaseParserRuleContext) SetInvokingState(t int) { + prc.invokingState = t +} + +func (prc *BaseParserRuleContext) GetRuleIndex() int { + return prc.RuleIndex +} + +func (prc *BaseParserRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (prc *BaseParserRuleContext) SetAltNumber(_ int) {} + +// IsEmpty returns true if the context of b is empty. +// +// A context is empty if there is no invoking state, meaning nobody calls +// current context. +func (prc *BaseParserRuleContext) IsEmpty() bool { + return prc.invokingState == -1 +} + +// GetParent returns the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +// +// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of this +// method. +func (prc *BaseParserRuleContext) GetParent() Tree { + return prc.parentCtx +} + +var ParserRuleContextEmpty = NewBaseParserRuleContext(nil, -1) + +type InterpreterRuleContext interface { + ParserRuleContext +} + +type BaseInterpreterRuleContext struct { + *BaseParserRuleContext +} + +//goland:noinspection GoUnusedExportedFunction +func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { + + prc := new(BaseInterpreterRuleContext) + + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = ruleIndex + + return prc +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go new file mode 100644 index 00000000..a1d5186b --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context.go @@ -0,0 +1,727 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +var _emptyPredictionContextHash int + +func init() { + _emptyPredictionContextHash = murmurInit(1) + _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0) +} + +func calculateEmptyHash() int { + return _emptyPredictionContextHash +} + +const ( + // BasePredictionContextEmptyReturnState represents {@code $} in an array in full context mode, $ + // doesn't mean wildcard: + // + // $ + x = [$,x] + // + // Here, + // + // $ = EmptyReturnState + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// TODO: JI These are meant to be atomics - this does not seem to match the Java runtime here +// +//goland:noinspection GoUnusedGlobalVariable +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +const ( + PredictionContextEmpty = iota + PredictionContextSingleton + PredictionContextArray +) + +// PredictionContext is a go idiomatic implementation of PredictionContext that does not rty to +// emulate inheritance from Java, and can be used without an interface definition. An interface +// is not required because no user code will ever need to implement this interface. +type PredictionContext struct { + cachedHash int + pcType int + parentCtx *PredictionContext + returnState int + parents []*PredictionContext + returnStates []int +} + +func NewEmptyPredictionContext() *PredictionContext { + nep := &PredictionContext{} + nep.cachedHash = calculateEmptyHash() + nep.pcType = PredictionContextEmpty + nep.returnState = BasePredictionContextEmptyReturnState + return nep +} + +func NewBaseSingletonPredictionContext(parent *PredictionContext, returnState int) *PredictionContext { + pc := &PredictionContext{} + pc.pcType = PredictionContextSingleton + pc.returnState = returnState + pc.parentCtx = parent + if parent != nil { + pc.cachedHash = calculateHash(parent, returnState) + } else { + pc.cachedHash = calculateEmptyHash() + } + return pc +} + +func SingletonBasePredictionContextCreate(parent *PredictionContext, returnState int) *PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func NewArrayPredictionContext(parents []*PredictionContext, returnStates []int) *PredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + hash := murmurInit(1) + for _, parent := range parents { + hash = murmurUpdate(hash, parent.Hash()) + } + for _, returnState := range returnStates { + hash = murmurUpdate(hash, returnState) + } + hash = murmurFinish(hash, len(parents)<<1) + + nec := &PredictionContext{} + nec.cachedHash = hash + nec.pcType = PredictionContextArray + nec.parents = parents + nec.returnStates = returnStates + return nec +} + +func (p *PredictionContext) Hash() int { + return p.cachedHash +} + +func (p *PredictionContext) Equals(other Collectable[*PredictionContext]) bool { + if p == other { + return true + } + switch p.pcType { + case PredictionContextEmpty: + otherP := other.(*PredictionContext) + return other == nil || otherP == nil || otherP.isEmpty() + case PredictionContextSingleton: + return p.SingletonEquals(other) + case PredictionContextArray: + return p.ArrayEquals(other) + } + return false +} + +func (p *PredictionContext) ArrayEquals(o Collectable[*PredictionContext]) bool { + if o == nil { + return false + } + other := o.(*PredictionContext) + if other == nil || other.pcType != PredictionContextArray { + return false + } + if p.cachedHash != other.Hash() { + return false // can't be same if hash is different + } + + // Must compare the actual array elements and not just the array address + // + return intSlicesEqual(p.returnStates, other.returnStates) && + pcSliceEqual(p.parents, other.parents) +} + +func (p *PredictionContext) SingletonEquals(other Collectable[*PredictionContext]) bool { + if other == nil { + return false + } + otherP := other.(*PredictionContext) + if otherP == nil || otherP.pcType != PredictionContextSingleton { + return false + } + + if p.cachedHash != otherP.Hash() { + return false // Can't be same if hash is different + } + + if p.returnState != otherP.getReturnState(0) { + return false + } + + // Both parents must be nil if one is + if p.parentCtx == nil { + return otherP.parentCtx == nil + } + + return p.parentCtx.Equals(otherP.parentCtx) +} + +func (p *PredictionContext) GetParent(i int) *PredictionContext { + switch p.pcType { + case PredictionContextEmpty: + return nil + case PredictionContextSingleton: + return p.parentCtx + case PredictionContextArray: + return p.parents[i] + } + return nil +} + +func (p *PredictionContext) getReturnState(i int) int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates[i] + default: + return p.returnState + } +} + +func (p *PredictionContext) GetReturnStates() []int { + switch p.pcType { + case PredictionContextArray: + return p.returnStates + default: + return []int{p.returnState} + } +} + +func (p *PredictionContext) length() int { + switch p.pcType { + case PredictionContextArray: + return len(p.returnStates) + default: + return 1 + } +} + +func (p *PredictionContext) hasEmptyPath() bool { + switch p.pcType { + case PredictionContextSingleton: + return p.returnState == BasePredictionContextEmptyReturnState + } + return p.getReturnState(p.length()-1) == BasePredictionContextEmptyReturnState +} + +func (p *PredictionContext) String() string { + switch p.pcType { + case PredictionContextEmpty: + return "$" + case PredictionContextSingleton: + var up string + + if p.parentCtx == nil { + up = "" + } else { + up = p.parentCtx.String() + } + + if len(up) == 0 { + if p.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(p.returnState) + } + + return strconv.Itoa(p.returnState) + " " + up + case PredictionContextArray: + if p.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(p.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if p.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(p.returnStates[i]) + if !p.parents[i].isEmpty() { + s = s + " " + p.parents[i].String() + } else { + s = s + "nil" + } + } + return s + "]" + + default: + return "unknown" + } +} + +func (p *PredictionContext) isEmpty() bool { + switch p.pcType { + case PredictionContextEmpty: + return true + case PredictionContextArray: + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return p.returnStates[0] == BasePredictionContextEmptyReturnState + default: + return false + } +} + +func (p *PredictionContext) Type() int { + return p.pcType +} + +func calculateHash(parent *PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.Hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) *PredictionContext { + if outerContext == nil { + outerContext = ParserRuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == ParserRuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + + // Share same graph if both same + // + if a == b || a.Equals(b) { + return a + } + + if a.pcType == PredictionContextSingleton && b.pcType == PredictionContextSingleton { + return mergeSingletons(a, b, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as wildcard + if rootIsWildcard { + if a.isEmpty() { + return a + } + if b.isEmpty() { + return b + } + } + + // Convert either Singleton or Empty to arrays, so that we can merge them + // + ara := convertToArray(a) + arb := convertToArray(b) + return mergeArrays(ara, arb, rootIsWildcard, mergeCache) +} + +func convertToArray(pc *PredictionContext) *PredictionContext { + switch pc.Type() { + case PredictionContextEmpty: + return NewArrayPredictionContext([]*PredictionContext{}, []int{}) + case PredictionContextSingleton: + return NewArrayPredictionContext([]*PredictionContext{pc.GetParent(0)}, []int{pc.getReturnState(0)}) + default: + // Already an array + } + return pc +} + +// mergeSingletons merges two Singleton [PredictionContext] instances. +// +// Stack tops equal, parents merge is same return left graph. +//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A new root node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + return previous + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.Put(a, b, rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent.Equals(a.parentCtx) { + return a // ax + bx = ax, if a=b + } + if parent.Equals(b.parentCtx) { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array. + // New joined parent so create a new singleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.Put(a, b, spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent *PredictionContext + if a.Equals(b) || (a.parentCtx != nil && a.parentCtx.Equals(b.parentCtx)) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []*PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []*PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []*PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.Put(a, b, apc) + } + return apc +} + +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b *PredictionContext, rootIsWildcard bool) *PredictionContext { + if rootIsWildcard { + if a.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // // + b =// + } + if b.pcType == PredictionContextEmpty { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a.isEmpty() && b.isEmpty() { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a.isEmpty() { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b.isEmpty() { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []*PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// +//goland:noinspection GoBoolExpressions +func mergeArrays(a, b *PredictionContext, rootIsWildcard bool, mergeCache *JPCMap) *PredictionContext { + if mergeCache != nil { + previous, present := mergeCache.Get(a, b) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + previous, present = mergeCache.Get(b, a) + if present { + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> previous") + } + return previous + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]*PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := aParent != nil && bParent != nil && aParent.Equals(bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.Put(a, b, pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: JI track whether this is possible above during merge sort for speed and possibly avoid an allocation + if M.Equals(a) { + if mergeCache != nil { + mergeCache.Put(a, b, a) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> a") + } + return a + } + if M.Equals(b) { + if mergeCache != nil { + mergeCache.Put(a, b, b) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> b") + } + return b + } + combineCommonParents(&mergedParents) + + if mergeCache != nil { + mergeCache.Put(a, b, M) + } + if runtimeConfig.parserATNSimulatorTraceATNSim { + fmt.Println("mergeArrays a=" + a.String() + ",b=" + b.String() + " -> " + M.String()) + } + return M +} + +// Make pass over all M parents and merge any Equals() ones. +// Note that we pass a pointer to the slice as we want to modify it in place. +// +//goland:noinspection GoUnusedFunction +func combineCommonParents(parents *[]*PredictionContext) { + uniqueParents := NewJStore[*PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCollection, "combineCommonParents for PredictionContext") + + for p := 0; p < len(*parents); p++ { + parent := (*parents)[p] + _, _ = uniqueParents.Put(parent) + } + for q := 0; q < len(*parents); q++ { + pc, _ := uniqueParents.Get((*parents)[q]) + (*parents)[q] = pc + } +} + +func getCachedBasePredictionContext(context *PredictionContext, contextCache *PredictionContextCache, visited *VisitRecord) *PredictionContext { + if context.isEmpty() { + return context + } + existing, present := visited.Get(context) + if present { + return existing + } + + existing, present = contextCache.Get(context) + if present { + visited.Put(context, existing) + return existing + } + changed := false + parents := make([]*PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || !parent.Equals(context.GetParent(i)) { + if !changed { + parents = make([]*PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited.Put(context, context) + return context + } + var updated *PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.GetReturnStates()) + } + contextCache.add(updated) + visited.Put(updated, updated) + visited.Put(context, updated) + + return updated +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go new file mode 100644 index 00000000..25dfb11e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_context_cache.go @@ -0,0 +1,48 @@ +package antlr + +var BasePredictionContextEMPTY = &PredictionContext{ + cachedHash: calculateEmptyHash(), + pcType: PredictionContextEmpty, + returnState: BasePredictionContextEmptyReturnState, +} + +// PredictionContextCache is Used to cache [PredictionContext] objects. It is used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. +type PredictionContextCache struct { + cache *JMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]] +} + +func NewPredictionContextCache() *PredictionContextCache { + return &PredictionContextCache{ + cache: NewJMap[*PredictionContext, *PredictionContext, Comparator[*PredictionContext]](pContextEqInst, PredictionContextCacheCollection, "NewPredictionContextCache()"), + } +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a new context to the cache. +// Protect shared cache from unsafe thread access. +func (p *PredictionContextCache) add(ctx *PredictionContext) *PredictionContext { + if ctx.isEmpty() { + return BasePredictionContextEMPTY + } + + // Put will return the existing entry if it is present (note this is done via Equals, not whether it is + // the same pointer), otherwise it will add the new entry and return that. + // + existing, present := p.cache.Get(ctx) + if present { + return existing + } + p.cache.Put(ctx, ctx) + return ctx +} + +func (p *PredictionContextCache) Get(ctx *PredictionContext) (*PredictionContext, bool) { + pc, exists := p.cache.Get(ctx) + return pc, exists +} + +func (p *PredictionContextCache) length() int { + return p.cache.Len() +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go new file mode 100644 index 00000000..3f85a6a5 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/prediction_mode.go @@ -0,0 +1,536 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // PredictionModeSLL represents the SLL(*) prediction mode. + // This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // [PredictionModeLL] prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the SLL prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful LL prediction abilities to complete successfully. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeSLL = 0 + + // PredictionModeLL represents the LL(*) prediction mode. + // This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // report a precise answer for exactly which alternatives are + // ambiguous. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLL = 1 + + // PredictionModeLLExactAmbigDetection represents the LL(*) prediction mode + // with exact ambiguity detection. + // + // In addition to the correctness guarantees provided by the [PredictionModeLL] prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary. + // + // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs. + // + PredictionModeLLExactAmbigDetection = 2 +) + +// PredictionModehasSLLConflictTerminatingPrediction computes the SLL prediction termination condition. +// +// This method computes the SLL prediction termination condition for both of +// the following cases: +// +// - The usual SLL+LL fallback upon SLL conflict +// - Pure SLL without LL fallback +// +// # Combined SLL+LL Parsing +// +// When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction. +// +// Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative, e.g. +// +// {1,2} and {3,4} +// +// If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations. +// +// Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty. +// +// # Heuristic +// +// As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon): +// +// [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ; +// +// When the [ATN] simulation reaches the state before ';', it has a +// [DFA] state that looks like: +// +// [12|1|[], 6|2|[], 12|2|[]] +// +// Naturally +// +// 12|1|[] and 12|2|[] +// +// conflict, but we cannot stop processing this node because alternative to has another way to continue, +// via +// +// [6|2|[]] +// +// It also let's us continue for this rule: +// +// [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ; +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state immediately before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue, and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done. +// +// # Pure SLL Parsing +// +// To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic. +// +// # Predicates in SLL+LL Parsing +// +// SLL decisions don't evaluate predicates until after they reach [DFA] stop +// states because they need to create the [DFA] cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation, so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates. +// +// Implementation-wise, [ATNConfigSet] combines stack contexts but not +// semantic predicate contexts, so we might see two configurations like the +// following: +// +// (s, 1, x, {}), (s, 1, x', {p}) +// +// Before testing these configurations against others, we have to merge +// x and x' (without modifying the existing configurations). +// For example, we test (x+x')==x” when looking for conflicts in +// the following configurations: +// +// (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x”, {}) +// +// If the configuration set has predicates (as indicated by +// [ATNConfigSet.hasSemanticContext]), this algorithm makes a copy of +// the configurations to strip out all the predicates so that a standard +// [ATNConfigSet] will merge everything ignoring predicates. +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs *ATNConfigSet) bool { + + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input, so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.hasSemanticContext { + // dup configs, tossing out semantic predicates + dup := NewATNConfigSet(false) + for _, c := range configs.configs { + + // NewATNConfig({semanticContext:}, c) + c = NewATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar predicates + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// PredictionModehasConfigInRuleStopState checks if any configuration in the given configs is in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// The func returns true if any configuration in the supplied configs is in a [RuleStopState] +func PredictionModehasConfigInRuleStopState(configs *ATNConfigSet) bool { + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// PredictionModeallConfigsInRuleStopStates checks if all configurations in configs are in a +// [RuleStopState]. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// the func returns true if all configurations in configs are in a +// [RuleStopState] +func PredictionModeallConfigsInRuleStopStates(configs *ATNConfigSet) bool { + + for _, c := range configs.configs { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// PredictionModeresolvesToJustOneViableAlt checks full LL prediction termination. +// +// Can we stop looking ahead during [ATN] simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict. +// +// The basic idea is to split the set of configurations 'C', into +// conflicting subsets (s, _, ctx, _) and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical [ATNConfig].state and [ATNConfig].context values +// but a different [ATNConfig].alt value, e.g. +// +// (s, i, ctx, _) +// +// and +// +// (s, j, ctx, _) ; for i != j +// +// Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows: +// +// A_s,ctx = {i | (s, i, ctx, _)} +// +// for each configuration in C holding s and ctx fixed. +// +// Or in pseudo-code: +// +// for each configuration c in C: +// map[c] U = c.ATNConfig.alt alt // map hash/equals uses s and x, not alt and not pred +// +// The values in map are the set of +// +// A_s,ctx +// +// sets. +// +// If +// +// |A_s,ctx| = 1 +// +// then there is no conflict associated with s and ctx. +// +// Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// further lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round. +// +// The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal; you will still have the conflict. It's just inefficient. It +// might even look until the end of file. +// +// No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check. +// +// # Conflicting Configs +// +// Two configurations: +// +// (s, i, x) and (s, j, x') +// +// conflict when i != j but x = x'. Because we merge all +// (s, i, _) configurations together, that means that there are at +// most n configurations associated with state s for +// n possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts x and x'. +// +// Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either x or x'. +// If the x associated with lowest alternative i +// is the superset, then i is the only possible prediction since the +// others resolve to min(i) as well. However, if x is +// associated with j > i then at least one stack configuration for +// j is not in conflict with alternative i. The algorithm +// should keep going, looking for more lookahead due to the uncertainty. +// +// For simplicity, I'm doing an equality check between x and +// x', which lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict. +// +// # Continue/Stop Rule +// +// Continue if the union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict. +// +// The complete set of alternatives, +// +// [i for (_, i, _)] +// +// tells us which alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set. +// +// Cases +// +// - no conflicts and more than 1 alternative in set => continue +// - (s, 1, x), (s, 2, x), (s, 3, z), (s', 1, y), (s', 2, y) yields non-conflicting set +// {3} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1,3} => continue +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y), (s”, 1, z) yields non-conflicting set +// {1} ∪ conflicting sets min({1,2}) ∪ min({1,2}) = {1} => stop and predict 1 +// - (s, 1, x), (s, 2, x), (s', 1, y), (s', 2, y) yields conflicting, reduced sets +// {1} ∪ {1} = {1} => stop and predict 1, can announce ambiguity {1,2} +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 2, y), (s', 3, y) yields conflicting, reduced sets +// {1} ∪ {2} = {1,2} => continue +// - (s, 1, x), (s, 2, x), (s', 3, y), (s', 4, y) yields conflicting, reduced sets +// {1} ∪ {3} = {1,3} => continue +// +// # Exact Ambiguity Detection +// +// If all states report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set: +// +// |A_i| > 1 +// +// and +// +// A_i = A_j ; for all i, j +// +// In other words, we continue examining lookahead until all A_i +// have more than one alternative and all A_i are the same. If +// +// A={{1,2}, {1,3}} +// +// then regular LL prediction would terminate because the resolved set is {1}. +// To determine what the real ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like: +// +// A={{1,2}} +// +// or +// +// {{1,2},{1,2}}, etc... +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// PredictionModeallSubsetsConflict determines if every alternative subset in altsets contains more +// than one alternative. +// +// The func returns true if every [BitSet] in altsets has +// [BitSet].cardinality cardinality > 1 +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// PredictionModehasNonConflictingAltSet determines if any single alternative subset in altsets contains +// exactly one alternative. +// +// The func returns true if altsets contains at least one [BitSet] with +// [BitSet].cardinality cardinality 1 +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// PredictionModehasConflictingAltSet determines if any single alternative subset in altsets contains +// more than one alternative. +// +// The func returns true if altsets contains a [BitSet] with +// [BitSet].cardinality cardinality > 1, otherwise false +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// PredictionModeallSubsetsEqual determines if every alternative subset in altsets is equivalent. +// +// The func returns true if every member of altsets is equal to the others. +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// PredictionModegetUniqueAlt returns the unique alternative predicted by all alternative subsets in +// altsets. If no such alternative exists, this method returns +// [ATNInvalidAltNumber]. +// +// @param altsets a collection of alternative subsets +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// PredictionModeGetAlts returns the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each [BitSet] +// in altsets, being the set of represented alternatives in altsets. +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// PredictionModegetConflictingAltSubsets gets the conflicting alt subsets from a configuration set. +// +// for each configuration c in configs: +// map[c] U= c.ATNConfig.alt // map hash/equals uses s and x, not alt and not pred +func PredictionModegetConflictingAltSubsets(configs *ATNConfigSet) []*BitSet { + configToAlts := NewJMap[*ATNConfig, *BitSet, *ATNAltConfigComparator[*ATNConfig]](atnAltCfgEqInst, AltSetCollection, "PredictionModegetConflictingAltSubsets()") + + for _, c := range configs.configs { + + alts, ok := configToAlts.Get(c) + if !ok { + alts = NewBitSet() + configToAlts.Put(c, alts) + } + alts.add(c.GetAlt()) + } + + return configToAlts.Values() +} + +// PredictionModeGetStateToAltMap gets a map from state to alt subset from a configuration set. +// +// for each configuration c in configs: +// map[c.ATNConfig.state] U= c.ATNConfig.alt} +func PredictionModeGetStateToAltMap(configs *ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.configs { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs *ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +// PredictionModegetSingleViableAlt gets the single alternative predicted by all alternative subsets in altsets +// if there is one. +// +// TODO: JI - Review this code - it does not seem to do the same thing as the Java code - maybe because [BitSet] is not like the Java utils BitSet +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/recognizer.go b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go new file mode 100644 index 00000000..dcb8548c --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/recognizer.go @@ -0,0 +1,241 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener + HasError() bool + GetError() RecognitionException + SetError(RecognitionException) +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string + SynErr RecognitionException +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +//goland:noinspection GoUnusedGlobalVariable +var tokenTypeMapCache = make(map[string]int) + +//goland:noinspection GoUnusedGlobalVariable +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.13.1" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) SetError(err RecognitionException) { + b.SynErr = err +} + +func (b *BaseRecognizer) HasError() bool { + return b.SynErr != nil +} + +func (b *BaseRecognizer) GetError() RecognitionException { + return b.SynErr +} + +func (b *BaseRecognizer) Action(_ RuleContext, _, _ int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// GetRuleIndexMap Get a map from rule names to rule indexes. +// +// Used for XPath and tree pattern compilation. +// +// TODO: JI This is not yet implemented in the Go runtime. Maybe not needed. +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +// GetTokenType get the token type based upon its name +func (b *BaseRecognizer) GetTokenType(_ string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// GetErrorHeader returns the error header, normally line/character position information. +// +// Can be overridden in sub structs embedding BaseRecognizer. +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// GetTokenErrorDisplay shows how a token should be displayed in an error message. +// +// The default is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// Deprecated: This method is not called by the ANTLR 4 Runtime. Specific +// implementations of [ANTLRErrorStrategy] may provide a similar +// feature when necessary. For example, see [DefaultErrorStrategy].GetTokenErrorDisplay() +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// Sempred embedding structs need to override this if there are sempreds or actions +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Sempred(_ RuleContext, _ int, _ int) bool { + return true +} + +// Precpred embedding structs need to override this if there are preceding predicates +// that the ATN interpreter needs to execute +func (b *BaseRecognizer) Precpred(_ RuleContext, _ int) bool { + return true +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/rule_context.go b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go new file mode 100644 index 00000000..f2ad0479 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/rule_context.go @@ -0,0 +1,40 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// RuleContext is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. +// +// We actually carry no information about the rule associated with this context (except +// when parsing). We keep only the state number of the invoking state from +// the [ATN] submachine that invoked this. Contrast this with the s +// pointer inside [ParserRuleContext] that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the struct +// [ParserRuleContext], which embeds a RuleContext. +// +// @see ParserRuleContext +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go new file mode 100644 index 00000000..68cb9061 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/semantic_context.go @@ -0,0 +1,464 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// SemanticContext is a tree structure used to record the semantic context in which +// +// an ATN configuration is valid. It's either a single predicate, +// a conjunction p1 && p2, or a sum of products p1 || p2. +// +// I have scoped the AND, OR, and Predicate subclasses of +// [SemanticContext] within the scope of this outer ``class'' +type SemanticContext interface { + Equals(other Collectable[SemanticContext]) bool + Hash() int + + evaluate(parser Recognizer, outerContext RuleContext) bool + evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext + + String() string +} + +func SemanticContextandContext(a, b SemanticContext) SemanticContext { + if a == nil || a == SemanticContextNone { + return b + } + if b == nil || b == SemanticContextNone { + return a + } + result := NewAND(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +func SemanticContextorContext(a, b SemanticContext) SemanticContext { + if a == nil { + return b + } + if b == nil { + return a + } + if a == SemanticContextNone || b == SemanticContextNone { + return SemanticContextNone + } + result := NewOR(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +type Predicate struct { + ruleIndex int + predIndex int + isCtxDependent bool +} + +func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { + p := new(Predicate) + + p.ruleIndex = ruleIndex + p.predIndex = predIndex + p.isCtxDependent = isCtxDependent // e.g., $i ref in pred + return p +} + +//The default {@link SemanticContext}, which is semantically equivalent to +//a predicate of the form {@code {true}?}. + +var SemanticContextNone = NewPredicate(-1, -1, false) + +func (p *Predicate) evalPrecedence(_ Recognizer, _ RuleContext) SemanticContext { + return p +} + +func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + + var localctx RuleContext + + if p.isCtxDependent { + localctx = outerContext + } + + return parser.Sempred(localctx, p.ruleIndex, p.predIndex) +} + +func (p *Predicate) Equals(other Collectable[SemanticContext]) bool { + if p == other { + return true + } else if _, ok := other.(*Predicate); !ok { + return false + } else { + return p.ruleIndex == other.(*Predicate).ruleIndex && + p.predIndex == other.(*Predicate).predIndex && + p.isCtxDependent == other.(*Predicate).isCtxDependent + } +} + +func (p *Predicate) Hash() int { + h := murmurInit(0) + h = murmurUpdate(h, p.ruleIndex) + h = murmurUpdate(h, p.predIndex) + if p.isCtxDependent { + h = murmurUpdate(h, 1) + } else { + h = murmurUpdate(h, 0) + } + return murmurFinish(h, 3) +} + +func (p *Predicate) String() string { + return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" +} + +type PrecedencePredicate struct { + precedence int +} + +func NewPrecedencePredicate(precedence int) *PrecedencePredicate { + + p := new(PrecedencePredicate) + p.precedence = precedence + + return p +} + +func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + return parser.Precpred(outerContext, p.precedence) +} + +func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + if parser.Precpred(outerContext, p.precedence) { + return SemanticContextNone + } + + return nil +} + +func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { + return p.precedence - other.precedence +} + +func (p *PrecedencePredicate) Equals(other Collectable[SemanticContext]) bool { + + var op *PrecedencePredicate + var ok bool + if op, ok = other.(*PrecedencePredicate); !ok { + return false + } + + if p == op { + return true + } + + return p.precedence == other.(*PrecedencePredicate).precedence +} + +func (p *PrecedencePredicate) Hash() int { + h := uint32(1) + h = 31*h + uint32(p.precedence) + return int(h) +} + +func (p *PrecedencePredicate) String() string { + return "{" + strconv.Itoa(p.precedence) + ">=prec}?" +} + +func PrecedencePredicatefilterPrecedencePredicates(set *JStore[SemanticContext, Comparator[SemanticContext]]) []*PrecedencePredicate { + result := make([]*PrecedencePredicate, 0) + + set.Each(func(v SemanticContext) bool { + if c2, ok := v.(*PrecedencePredicate); ok { + result = append(result, c2) + } + return true + }) + + return result +} + +// A semantic context which is true whenever none of the contained contexts +// is false.` + +type AND struct { + opnds []SemanticContext +} + +func NewAND(a, b SemanticContext) *AND { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewAND() operands") + if aa, ok := a.(*AND); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*AND); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + opnds := make([]SemanticContext, len(vs)) + copy(opnds, vs) + + and := new(AND) + and.opnds = opnds + + return and +} + +func (a *AND) Equals(other Collectable[SemanticContext]) bool { + if a == other { + return true + } + if _, ok := other.(*AND); !ok { + return false + } else { + for i, v := range other.(*AND).opnds { + if !a.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +// {@inheritDoc} +// +//

+// The evaluation of predicates by a context is short-circuiting, but +// unordered.

+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if !a.opnds[i].evaluate(parser, outerContext) { + return false + } + } + return true +} + +func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == nil { + // The AND context is false if any element is false + return nil + } else if evaluated != SemanticContextNone { + // Reduce the result by Skipping true elements + operands = append(operands, evaluated) + } + } + if !differs { + return a + } + + if len(operands) == 0 { + // all elements were true, so the AND context is true + return SemanticContextNone + } + + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextandContext(result, o) + } + } + + return result +} + +func (a *AND) Hash() int { + h := murmurInit(37) // Init with a value different from OR + for _, op := range a.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (o *OR) Hash() int { + h := murmurInit(41) // Init with o value different from AND + for _, op := range o.opnds { + h = murmurUpdate(h, op.Hash()) + } + return murmurFinish(h, len(o.opnds)) +} + +func (a *AND) String() string { + s := "" + + for _, o := range a.opnds { + s += "&& " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} + +// +// A semantic context which is true whenever at least one of the contained +// contexts is true. +// + +type OR struct { + opnds []SemanticContext +} + +func NewOR(a, b SemanticContext) *OR { + + operands := NewJStore[SemanticContext, Comparator[SemanticContext]](semctxEqInst, SemanticContextCollection, "NewOR() operands") + if aa, ok := a.(*OR); ok { + for _, o := range aa.opnds { + operands.Put(o) + } + } else { + operands.Put(a) + } + + if ba, ok := b.(*OR); ok { + for _, o := range ba.opnds { + operands.Put(o) + } + } else { + operands.Put(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { + reduced = p + } + } + + operands.Put(reduced) + } + + vs := operands.Values() + + opnds := make([]SemanticContext, len(vs)) + copy(opnds, vs) + + o := new(OR) + o.opnds = opnds + + return o +} + +func (o *OR) Equals(other Collectable[SemanticContext]) bool { + if o == other { + return true + } else if _, ok := other.(*OR); !ok { + return false + } else { + for i, v := range other.(*OR).opnds { + if !o.opnds[i].Equals(v) { + return false + } + } + return true + } +} + +//

+// The evaluation of predicates by o context is short-circuiting, but +// unordered.

+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(o.opnds); i++ { + if o.opnds[i].evaluate(parser, outerContext) { + return true + } + } + return false +} + +func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + for i := 0; i < len(o.opnds); i++ { + context := o.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == SemanticContextNone { + // The OR context is true if any element is true + return SemanticContextNone + } else if evaluated != nil { + // Reduce the result by Skipping false elements + operands = append(operands, evaluated) + } + } + if !differs { + return o + } + if len(operands) == 0 { + // all elements were false, so the OR context is false + return nil + } + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextorContext(result, o) + } + } + + return result +} + +func (o *OR) String() string { + s := "" + + for _, o := range o.opnds { + s += "|| " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/statistics.go b/vendor/github.com/antlr4-go/antlr/v4/statistics.go new file mode 100644 index 00000000..8cb5f3ed --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/statistics.go @@ -0,0 +1,280 @@ +//go:build antlr.stats + +package antlr + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" + "strconv" +) + +// This file allows the user to collect statistics about the runtime of the ANTLR runtime. It is not enabled by default +// and so incurs no time penalty. To enable it, you must build the runtime with the antlr.stats build tag. +// + +// Tells various components to collect statistics - because it is only true when this file is included, it will +// allow the compiler to completely eliminate all the code that is only used when collecting statistics. +const collectStats = true + +// goRunStats is a collection of all the various data the ANTLR runtime has collected about a particular run. +// It is exported so that it can be used by others to look for things that are not already looked for in the +// runtime statistics. +type goRunStats struct { + + // jStats is a slice of all the [JStatRec] records that have been created, which is one for EVERY collection created + // during a run. It is exported so that it can be used by others to look for things that are not already looked for + // within this package. + // + jStats []*JStatRec + jStatsLock RWMutex + topN int + topNByMax []*JStatRec + topNByUsed []*JStatRec + unusedCollections map[CollectionSource]int + counts map[CollectionSource]int +} + +const ( + collectionsFile = "collections" +) + +var ( + Statistics = &goRunStats{ + topN: 10, + } +) + +type statsOption func(*goRunStats) error + +// Configure allows the statistics system to be configured as the user wants and override the defaults +func (s *goRunStats) Configure(options ...statsOption) error { + for _, option := range options { + err := option(s) + if err != nil { + return err + } + } + return nil +} + +// WithTopN sets the number of things to list in the report when we are concerned with the top N things. +// +// For example, if you want to see the top 20 collections by size, you can do: +// +// antlr.Statistics.Configure(antlr.WithTopN(20)) +func WithTopN(topN int) statsOption { + return func(s *goRunStats) error { + s.topN = topN + return nil + } +} + +// Analyze looks through all the statistical records and computes all the outputs that might be useful to the user. +// +// The function gathers and analyzes a number of statistics about any particular run of +// an ANTLR generated recognizer. In the vast majority of cases, the statistics are only +// useful to maintainers of ANTLR itself, but they can be useful to users as well. They may be +// especially useful in tracking down bugs or performance problems when an ANTLR user could +// supply the output from this package, but cannot supply the grammar file(s) they are using, even +// privately to the maintainers. +// +// The statistics are gathered by the runtime itself, and are not gathered by the parser or lexer, but the user +// must call this function their selves to analyze the statistics. This is because none of the infrastructure is +// extant unless the calling program is built with the antlr.stats tag like so: +// +// go build -tags antlr.stats . +// +// When a program is built with the antlr.stats tag, the Statistics object is created and available outside +// the package. The user can then call the [Statistics.Analyze] function to analyze the statistics and then call the +// [Statistics.Report] function to report the statistics. +// +// Please forward any questions about this package to the ANTLR discussion groups on GitHub or send to them to +// me [Jim Idle] directly at jimi@idle.ws +// +// [Jim Idle]: https:://github.com/jim-idle +func (s *goRunStats) Analyze() { + + // Look for anything that looks strange and record it in our local maps etc for the report to present it + // + s.CollectionAnomalies() + s.TopNCollections() +} + +// TopNCollections looks through all the statistical records and gathers the top ten collections by size. +func (s *goRunStats) TopNCollections() { + + // Let's sort the stat records by MaxSize + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].MaxSize > s.jStats[j].MaxSize + }) + + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByMax = append(s.topNByMax, s.jStats[i]) + } + + // Sort by the number of times used + // + sort.Slice(s.jStats, func(i, j int) bool { + return s.jStats[i].Gets+s.jStats[i].Puts > s.jStats[j].Gets+s.jStats[j].Puts + }) + for i := 0; i < len(s.jStats) && i < s.topN; i++ { + s.topNByUsed = append(s.topNByUsed, s.jStats[i]) + } +} + +// Report dumps a markdown formatted report of all the statistics collected during a run to the given dir output +// path, which should represent a directory. Generated files will be prefixed with the given prefix and will be +// given a type name such as `anomalies` and a time stamp such as `2021-09-01T12:34:56` and a .md suffix. +func (s *goRunStats) Report(dir string, prefix string) error { + + isDir, err := isDirectory(dir) + switch { + case err != nil: + return err + case !isDir: + return fmt.Errorf("output directory `%s` is not a directory", dir) + } + s.reportCollections(dir, prefix) + + // Clean out any old data in case the user forgets + // + s.Reset() + return nil +} + +func (s *goRunStats) Reset() { + s.jStats = nil + s.topNByUsed = nil + s.topNByMax = nil +} + +func (s *goRunStats) reportCollections(dir, prefix string) { + cname := filepath.Join(dir, ".asciidoctor") + // If the file doesn't exist, create it, or append to the file + f, err := os.OpenFile(cname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + _, _ = f.WriteString(`// .asciidoctorconfig +++++ + +++++`) + _ = f.Close() + + fname := filepath.Join(dir, prefix+"_"+"_"+collectionsFile+"_"+".adoc") + // If the file doesn't exist, create it, or append to the file + f, err = os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Fatal(err) + } + defer func(f *os.File) { + err := f.Close() + if err != nil { + log.Fatal(err) + } + }(f) + _, _ = f.WriteString("= Collections for " + prefix + "\n\n") + + _, _ = f.WriteString("== Summary\n") + + if s.unusedCollections != nil { + _, _ = f.WriteString("=== Unused Collections\n") + _, _ = f.WriteString("Unused collections incur a penalty for allocation that makes them a candidate for either\n") + _, _ = f.WriteString(" removal or optimization. If you are using a collection that is not used, you should\n") + _, _ = f.WriteString(" consider removing it. If you are using a collection that is used, but not very often,\n") + _, _ = f.WriteString(" you should consider using lazy initialization to defer the allocation until it is\n") + _, _ = f.WriteString(" actually needed.\n\n") + + _, _ = f.WriteString("\n.Unused collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + + for k, v := range s.unusedCollections { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + f.WriteString("|===\n\n") + } + + _, _ = f.WriteString("\n.Summary of Collections\n") + _, _ = f.WriteString(`[cols="<3,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Type | Count\n") + for k, v := range s.counts { + _, _ = f.WriteString("| " + CollectionDescriptors[k].SybolicName + " | " + strconv.Itoa(v) + "\n") + } + _, _ = f.WriteString("| Total | " + strconv.Itoa(len(s.jStats)) + "\n") + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by MaxSize\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets\n") + for _, c := range s.topNByMax { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") + + _, _ = f.WriteString("\n.Summary of Top " + strconv.Itoa(s.topN) + " Collections by Access\n") + _, _ = f.WriteString(`[cols="<1,<3,>1,>1,>1,>1,>1"]` + "\n\n") + _, _ = f.WriteString("|===\n") + _, _ = f.WriteString("| Source | Description | MaxSize | EndSize | Puts | Gets | P+G\n") + for _, c := range s.topNByUsed { + _, _ = f.WriteString("| " + CollectionDescriptors[c.Source].SybolicName + "\n") + _, _ = f.WriteString("| " + c.Description + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.MaxSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.CurSize) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Puts) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets) + "\n") + _, _ = f.WriteString("| " + strconv.Itoa(c.Gets+c.Puts) + "\n") + _, _ = f.WriteString("\n") + } + _, _ = f.WriteString("|===\n\n") +} + +// AddJStatRec adds a [JStatRec] record to the [goRunStats] collection when build runtimeConfig antlr.stats is enabled. +func (s *goRunStats) AddJStatRec(rec *JStatRec) { + s.jStatsLock.Lock() + defer s.jStatsLock.Unlock() + s.jStats = append(s.jStats, rec) +} + +// CollectionAnomalies looks through all the statistical records and gathers any anomalies that have been found. +func (s *goRunStats) CollectionAnomalies() { + s.jStatsLock.RLock() + defer s.jStatsLock.RUnlock() + s.counts = make(map[CollectionSource]int, len(s.jStats)) + for _, c := range s.jStats { + + // Accumlate raw counts + // + s.counts[c.Source]++ + + // Look for allocated but unused collections and count them + if c.MaxSize == 0 && c.Puts == 0 { + if s.unusedCollections == nil { + s.unusedCollections = make(map[CollectionSource]int) + } + s.unusedCollections[c.Source]++ + } + if c.MaxSize > 6000 { + fmt.Println("Collection ", c.Description, "accumulated a max size of ", c.MaxSize, " - this is probably too large and indicates a poorly formed grammar") + } + } + +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/stats_data.go b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go new file mode 100644 index 00000000..4d9eb94e --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/stats_data.go @@ -0,0 +1,23 @@ +package antlr + +// A JStatRec is a record of a particular use of a [JStore], [JMap] or JPCMap] collection. Typically, it will be +// used to look for unused collections that wre allocated anyway, problems with hash bucket clashes, and anomalies +// such as huge numbers of Gets with no entries found GetNoEnt. You can refer to the CollectionAnomalies() function +// for ideas on what can be gleaned from these statistics about collections. +type JStatRec struct { + Source CollectionSource + MaxSize int + CurSize int + Gets int + GetHits int + GetMisses int + GetHashConflicts int + GetNoEnt int + Puts int + PutHits int + PutMisses int + PutHashConflicts int + MaxSlotSize int + Description string + CreateStack []byte +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/token.go b/vendor/github.com/antlr4-go/antlr/v4/token.go new file mode 100644 index 00000000..f5bc3422 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/token.go @@ -0,0 +1,213 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream + + String() string +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // TokenEpsilon - during lookahead operations, this "token" signifies we hit the rule end [ATN] state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // TokenDefaultChannel is the default channel upon which tokens are sent to the parser. + // + // All tokens go to the parser (unless [Skip] is called in the lexer rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + TokenDefaultChannel = 0 + + // TokenHiddenChannel defines the normal hidden channel - the parser wil not see tokens that are not on [TokenDefaultChannel]. + // + // Anything on a different channel than TokenDefaultChannel is not parsed by parser. + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetText() string { + if b.text != "" { + return b.text + } + input := b.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if b.GetStart() < n && b.GetStop() < n { + return input.GetTextFromInterval(NewInterval(b.GetStart(), b.GetStop())) + } + return "" +} + +func (b *BaseToken) SetText(text string) { + b.text = text +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +func (b *BaseToken) String() string { + txt := b.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if b.GetChannel() > 0 { + ch = ",channel=" + strconv.Itoa(b.GetChannel()) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(b.GetTokenIndex()) + "," + strconv.Itoa(b.GetStart()) + ":" + strconv.Itoa(b.GetStop()) + "='" + + txt + "',<" + strconv.Itoa(b.GetTokenType()) + ">" + + ch + "," + strconv.Itoa(b.GetLine()) + ":" + strconv.Itoa(b.GetColumn()) + "]" +} + +type CommonToken struct { + BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := &CommonToken{ + BaseToken: BaseToken{ + source: source, + tokenType: tokenType, + channel: channel, + start: start, + stop: stop, + tokenIndex: -1, + }, + } + + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//

+// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.

+// +// @param oldToken The token to copy. +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_source.go b/vendor/github.com/antlr4-go/antlr/v4/token_source.go new file mode 100644 index 00000000..a3f36eaa --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/token_source.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenSource interface { + NextToken() Token + Skip() + More() + GetLine() int + GetCharPositionInLine() int + GetInputStream() CharStream + GetSourceName() string + setTokenFactory(factory TokenFactory) + GetTokenFactory() TokenFactory +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/token_stream.go b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go new file mode 100644 index 00000000..bf4ff663 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/token_stream.go @@ -0,0 +1,21 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenStream interface { + IntStream + + LT(k int) Token + Reset() + + Get(index int) Token + GetTokenSource() TokenSource + SetTokenSource(TokenSource) + + GetAllText() string + GetTextFromInterval(Interval) string + GetTextFromRuleContext(RuleContext) string + GetTextFromTokens(Token, Token) string +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go new file mode 100644 index 00000000..ccf59b46 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/tokenstream_rewriter.go @@ -0,0 +1,662 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "fmt" +) + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ +const ( + DefaultProgramName = "default" + ProgramInitSize = 100 + MinTokenIndex = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instructionIndex int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + opName string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation) GetInstructionIndex() int { + return op.instructionIndex +} + +func (op *BaseRewriteOperation) GetIndex() int { + return op.index +} + +func (op *BaseRewriteOperation) GetText() string { + return op.text +} + +func (op *BaseRewriteOperation) GetOpName() string { + return op.opName +} + +func (op *BaseRewriteOperation) GetTokens() TokenStream { + return op.tokens +} + +func (op *BaseRewriteOperation) SetInstructionIndex(val int) { + op.instructionIndex = val +} + +func (op *BaseRewriteOperation) SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation) SetText(val string) { + op.text = val +} + +func (op *BaseRewriteOperation) SetOpName(val string) { + op.opName = val +} + +func (op *BaseRewriteOperation) SetTokens(val TokenStream) { + op.tokens = val +} + +func (op *BaseRewriteOperation) Execute(_ *bytes.Buffer) int { + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.opName, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp { + return &InsertBeforeOp{BaseRewriteOperation: BaseRewriteOperation{ + index: index, + text: text, + opName: "InsertBeforeOp", + tokens: stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// InsertAfterOp distinguishes between insert after/before to do the "insert after" instructions +// first and then the "insert before" instructions at same index. Implementation +// of "insert after" is "insert before index+1". +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp { + return &InsertAfterOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: index + 1, + text: text, + tokens: stream, + }, + } +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF { + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index + 1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// ReplaceOp tries to replace range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct { + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream) *ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation: BaseRewriteOperation{ + index: from, + text: text, + opName: "ReplaceOp", + tokens: stream, + }, + LastIndex: to, + } +} + +func (op *ReplaceOp) Execute(buffer *bytes.Buffer) int { + if op.text != "" { + buffer.WriteString(op.text) + } + return op.LastIndex + 1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + lastRewriteTokenIndexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter { + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + DefaultProgramName: make([]RewriteOperation, 0, ProgramInitSize), + }, + lastRewriteTokenIndexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream { + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(programName string, instructionIndex int) { + is, ok := tsr.programs[programName] + if ok { + tsr.programs[programName] = is[MinTokenIndex:instructionIndex] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instructionIndex int) { + tsr.Rollback(DefaultProgramName, instructionIndex) +} + +// DeleteProgram Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(programName string) { + tsr.Rollback(programName, MinTokenIndex) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault() { + tsr.DeleteProgram(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) InsertAfter(programName string, index int, text string) { + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string) { + tsr.InsertAfter(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(programName string, token Token, text string) { + tsr.InsertAfter(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) InsertBefore(programName string, index int, text string) { + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string) { + tsr.InsertBefore(DefaultProgramName, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(programName string, token Token, text string) { + tsr.InsertBefore(programName, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(programName string, from, to int, text string) { + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size() { + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(programName) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(programName, op) +} + +func (tsr *TokenStreamRewriter) ReplaceDefault(from, to int, text string) { + tsr.Replace(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceDefaultPos(index int, text string) { + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) ReplaceToken(programName string, from, to Token, text string) { + tsr.Replace(programName, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefault(from, to Token, text string) { + tsr.ReplaceToken(DefaultProgramName, from, to, text) +} + +func (tsr *TokenStreamRewriter) ReplaceTokenDefaultPos(index Token, text string) { + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter) Delete(programName string, from, to int) { + tsr.Replace(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteDefault(from, to int) { + tsr.Delete(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) DeleteDefaultPos(index int) { + tsr.DeleteDefault(index, index) +} + +func (tsr *TokenStreamRewriter) DeleteToken(programName string, from, to Token) { + tsr.ReplaceToken(programName, from, to, "") +} + +func (tsr *TokenStreamRewriter) DeleteTokenDefault(from, to Token) { + tsr.DeleteToken(DefaultProgramName, from, to) +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndex(programName string) int { + i, ok := tsr.lastRewriteTokenIndexes[programName] + if !ok { + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter) GetLastRewriteTokenIndexDefault() int { + return tsr.GetLastRewriteTokenIndex(DefaultProgramName) +} + +func (tsr *TokenStreamRewriter) SetLastRewriteTokenIndex(programName string, i int) { + tsr.lastRewriteTokenIndexes[programName] = i +} + +func (tsr *TokenStreamRewriter) InitializeProgram(name string) []RewriteOperation { + is := make([]RewriteOperation, 0, ProgramInitSize) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter) AddToProgram(name string, op RewriteOperation) { + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter) GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok { + is = tsr.InitializeProgram(name) + } + return is +} + +// GetTextDefault returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetTextDefault() string { + return tsr.GetText( + DefaultProgramName, + NewInterval(0, tsr.tokens.Size()-1)) +} + +// GetText returns the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter) GetText(programName string, interval Interval) string { + rewrites := tsr.programs[programName] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start, 0) + if len(rewrites) == 0 { + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i := start; i <= stop && i < tsr.tokens.Size(); { + op := indexToOp[i] + delete(indexToOp, i) // remove so any left have index size-1 + t := tsr.tokens.Get(i) + if op == nil { + // no operation at that index, just dump token + if t.GetTokenType() != TokenEOF { + buf.WriteString(t.GetText()) + } + i++ // move to next token + } else { + i = op.Execute(&buf) // execute operation and skip + } + } + // include stuff after end if it's last index in buffer + // So, if they did an insertAfter(lastValidIndex, "foo"), include + // foo if end==lastValidIndex. + if stop == tsr.tokens.Size()-1 { + // Scan any remaining operations after last token + // should be included (they will be inserts). + for _, op := range indexToOp { + if op.GetIndex() >= tsr.tokens.Size()-1 { + buf.WriteString(op.GetText()) + } + } + } + return buf.String() +} + +// reduceToSingleOperationPerIndex combines operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... +// +// Here are the cases: +// +// I.i.u I.j.v leave alone, non-overlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, non-overlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, non-overlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this 'replace'. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the 'replace' range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// The func returns a map from token index to operation. +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation { + // WALK REPLACES + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + rop, ok := op.(*ReplaceOp) + if !ok { + continue + } + // Wipe prior inserts within range + for j := 0; j < i && j < len(rewrites); j++ { + if iop, ok := rewrites[j].(*InsertBeforeOp); ok { + if iop.index == rop.index { + // E.g., insert before 2, delete 2..2; update replace + // text to include insert before, kill insert + rewrites[iop.instructionIndex] = nil + if rop.text != "" { + rop.text = iop.text + rop.text + } else { + rop.text = iop.text + } + } else if iop.index > rop.index && iop.index <= rop.LastIndex { + // delete insert as it's a no-op. + rewrites[iop.instructionIndex] = nil + } + } + } + // Drop any prior replaces contained within + for j := 0; j < i && j < len(rewrites); j++ { + if prevop, ok := rewrites[j].(*ReplaceOp); ok { + if prevop.index >= rop.index && prevop.LastIndex <= rop.LastIndex { + // delete replace as it's a no-op. + rewrites[prevop.instructionIndex] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint { + rewrites[prevop.instructionIndex] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + } else if !disjoint { + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok { + continue + } + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j := 0; j < i && j < len(rewrites); j++ { + if nextIop, ok := rewrites[j].(*InsertAfterOp); ok { + if nextIop.index == iop.GetIndex() { + iop.SetText(nextIop.text + iop.GetText()) + rewrites[j] = nil + } + } + if prevIop, ok := rewrites[j].(*InsertBeforeOp); ok { + if prevIop.index == iop.GetIndex() { + iop.SetText(iop.GetText() + prevIop.text) + rewrites[prevIop.instructionIndex] = nil + } + } + } + // look for replaces where iop.index is in range; error + for j := 0; j < i && j < len(rewrites); j++ { + if rop, ok := rewrites[j].(*ReplaceOp); ok { + if iop.GetIndex() == rop.index { + rop.text = iop.GetText() + rop.text + rewrites[i] = nil + continue + } + if iop.GetIndex() >= rop.index && iop.GetIndex() <= rop.LastIndex { + panic("insert op " + iop.String() + " within boundaries of previous " + rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i := 0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil { + continue + } + if _, ok := m[op.GetIndex()]; ok { + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + +/* + Quick fixing Go lack of overloads +*/ + +func max(a, b int) int { + if a > b { + return a + } else { + return b + } +} +func min(a, b int) int { + if a < b { + return a + } else { + return b + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go new file mode 100644 index 00000000..7b663bf8 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/trace_listener.go @@ -0,0 +1,32 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type TraceListener struct { + parser *BaseParser +} + +func NewTraceListener(parser *BaseParser) *TraceListener { + tl := new(TraceListener) + tl.parser = parser + return tl +} + +func (t *TraceListener) VisitErrorNode(_ ErrorNode) { +} + +func (t *TraceListener) EnterEveryRule(ctx ParserRuleContext) { + fmt.Println("enter " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} + +func (t *TraceListener) VisitTerminal(node TerminalNode) { + fmt.Println("consume " + fmt.Sprint(node.GetSymbol()) + " rule " + t.parser.GetRuleNames()[t.parser.ctx.GetRuleIndex()]) +} + +func (t *TraceListener) ExitEveryRule(ctx ParserRuleContext) { + fmt.Println("exit " + t.parser.GetRuleNames()[ctx.GetRuleIndex()] + ", LT(1)=" + t.parser.input.LT(1).GetText()) +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/transition.go b/vendor/github.com/antlr4-go/antlr/v4/transition.go new file mode 100644 index 00000000..313b0fc1 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/transition.go @@ -0,0 +1,439 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +// atom, set, epsilon, action, predicate, rule transitions. +// +//

This is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(_, _, _ int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +//goland:noinspection GoUnusedGlobalVariable +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// AtomTransition +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + t := &AtomTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionATOM, + label: intervalSet, + isEpsilon: false, + }, + } + t.intervalSet = t.makeLabel() + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, _, _ int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + BaseTransition + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + return &RuleTransition{ + BaseTransition: BaseTransition{ + target: ruleStart, + isEpsilon: true, + serializationType: TransitionRULE, + }, + ruleIndex: ruleIndex, + precedence: precedence, + followState: followState, + } +} + +func (t *RuleTransition) Matches(_, _, _ int) bool { + return false +} + +type EpsilonTransition struct { + BaseTransition + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + return &EpsilonTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionEPSILON, + isEpsilon: true, + }, + outermostPrecedenceReturn: outermostPrecedenceReturn, + } +} + +func (t *EpsilonTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + BaseTransition + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + t := &RangeTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionRANGE, + isEpsilon: false, + }, + start: start, + stop: stop, + } + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, _, _ int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + var sb strings.Builder + sb.WriteByte('\'') + sb.WriteRune(rune(t.start)) + sb.WriteString("'..'") + sb.WriteRune(rune(t.stop)) + sb.WriteByte('\'') + return sb.String() +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + return &BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + }, + } +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + BaseAbstractPredicateTransition + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + return &PredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPREDICATE, + isEpsilon: true, + }, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + predIndex: predIndex, + } +} + +func (t *PredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + BaseTransition + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + return &ActionTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionACTION, + isEpsilon: true, + }, + isCtxDependent: isCtxDependent, + ruleIndex: ruleIndex, + actionIndex: actionIndex, + } +} + +func (t *ActionTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + t := &SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionSET, + }, + } + + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + return t +} + +func (t *SetTransition) Matches(symbol, _, _ int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + t := &NotSetTransition{ + SetTransition: SetTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionNOTSET, + }, + }, + } + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + return &WildcardTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionWILDCARD, + }, + } +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + BaseAbstractPredicateTransition + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + return &PrecedencePredicateTransition{ + BaseAbstractPredicateTransition: BaseAbstractPredicateTransition{ + BaseTransition: BaseTransition{ + target: target, + serializationType: TransitionPRECEDENCE, + isEpsilon: true, + }, + }, + precedence: precedence, + } +} + +func (t *PrecedencePredicateTransition) Matches(_, _, _ int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/tree.go b/vendor/github.com/antlr4-go/antlr/v4/tree.go new file mode 100644 index 00000000..c288420f --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/tree.go @@ -0,0 +1,304 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + GetSourceInterval() Interval +} + +type ParseTree interface { + SyntaxTree + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + GetRuleContext() RuleContext +} + +type TerminalNode interface { + ParseTree + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) } +func (v *BaseParseTreeVisitor) VisitChildren(_ RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(_ TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(_ ErrorNode) interface{} { return nil } + +// TODO: Implement this? +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(_ TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(_ ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(_ ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(_ ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(_ int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(_ []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(_ []string, _ Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Walk performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, [EnterRule] is called before +// recursively walking down into child nodes, then [ExitRule] is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// EnterRule enters a grammar rule by first triggering the generic event [ParseTreeListener].[EnterEveryRule] +// then by triggering the event specific to the given parse tree node +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// ExitRule exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event [ParseTreeListener].ExitEveryRule +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +//goland:noinspection GoUnusedGlobalVariable +var ParseTreeWalkerDefault = NewParseTreeWalker() + +type IterativeParseTreeWalker struct { + *ParseTreeWalker +} + +//goland:noinspection GoUnusedExportedFunction +func NewIterativeParseTreeWalker() *IterativeParseTreeWalker { + return new(IterativeParseTreeWalker) +} + +func (i *IterativeParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + var stack []Tree + var indexStack []int + currentNode := t + currentIndex := 0 + + for currentNode != nil { + // pre-order visit + switch tt := currentNode.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + i.EnterRule(listener, currentNode.(RuleNode)) + } + // Move down to first child, if exists + if currentNode.GetChildCount() > 0 { + stack = append(stack, currentNode) + indexStack = append(indexStack, currentIndex) + currentIndex = 0 + currentNode = currentNode.GetChild(0) + continue + } + + for { + // post-order visit + if ruleNode, ok := currentNode.(RuleNode); ok { + i.ExitRule(listener, ruleNode) + } + // No parent, so no siblings + if len(stack) == 0 { + currentNode = nil + currentIndex = 0 + break + } + // Move to next sibling if possible + currentIndex++ + if stack[len(stack)-1].GetChildCount() > currentIndex { + currentNode = stack[len(stack)-1].GetChild(currentIndex) + break + } + // No next, sibling, so move up + currentNode, stack = stack[len(stack)-1], stack[:len(stack)-1] + currentIndex, indexStack = indexStack[len(indexStack)-1], indexStack[:len(indexStack)-1] + } + } +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/trees.go b/vendor/github.com/antlr4-go/antlr/v4/trees.go new file mode 100644 index 00000000..f44c05d8 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/trees.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +/** A set of utility routines useful for all kinds of ANTLR trees. */ + +// TreesStringTree prints out a whole tree in LISP form. [getNodeText] is used on the +// node payloads to get the text for the nodes. Detects parse trees and extracts data appropriately. +func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { + + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + s := TreesGetNodeText(tree, ruleNames, nil) + + s = EscapeWhitespace(s, false) + c := tree.GetChildCount() + if c == 0 { + return s + } + res := "(" + s + " " + if c > 0 { + s = TreesStringTree(tree.GetChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreesStringTree(tree.GetChild(i), ruleNames, nil) + res += " " + s + } + res += ")" + return res +} + +func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + if ruleNames != nil { + switch t2 := t.(type) { + case RuleNode: + t3 := t2.GetRuleContext() + altNumber := t3.GetAltNumber() + + if altNumber != ATNInvalidAltNumber { + return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) + } + return ruleNames[t3.GetRuleIndex()] + case ErrorNode: + return fmt.Sprint(t2) + case TerminalNode: + if t2.GetSymbol() != nil { + return t2.GetSymbol().GetText() + } + } + } + + // no recognition for rule names + payload := t.GetPayload() + if p2, ok := payload.(Token); ok { + return p2.GetText() + } + + return fmt.Sprint(t.GetPayload()) +} + +// TreesGetChildren returns am ordered list of all children of this node +// +//goland:noinspection GoUnusedExportedFunction +func TreesGetChildren(t Tree) []Tree { + list := make([]Tree, 0) + for i := 0; i < t.GetChildCount(); i++ { + list = append(list, t.GetChild(i)) + } + return list +} + +// TreesgetAncestors returns a list of all ancestors of this node. The first node of list is the root +// and the last node is the parent of this node. +// +//goland:noinspection GoUnusedExportedFunction +func TreesgetAncestors(t Tree) []Tree { + ancestors := make([]Tree, 0) + t = t.GetParent() + for t != nil { + f := []Tree{t} + ancestors = append(f, ancestors...) + t = t.GetParent() + } + return ancestors +} + +//goland:noinspection GoUnusedExportedFunction +func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +//goland:noinspection GoUnusedExportedFunction +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { + return TreesfindAllNodes(t, ruleIndex, false) +} + +func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { + nodes := make([]ParseTree, 0) + treesFindAllNodes(t, index, findTokens, &nodes) + return nodes +} + +func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { + // check this node (the root) first + + t2, ok := t.(TerminalNode) + t3, ok2 := t.(ParserRuleContext) + + if findTokens && ok { + if t2.GetSymbol().GetTokenType() == index { + *nodes = append(*nodes, t2) + } + } else if !findTokens && ok2 { + if t3.GetRuleIndex() == index { + *nodes = append(*nodes, t3) + } + } + // check children + for i := 0; i < t.GetChildCount(); i++ { + treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) + } +} + +//goland:noinspection GoUnusedExportedFunction +func TreesDescendants(t ParseTree) []ParseTree { + nodes := []ParseTree{t} + for i := 0; i < t.GetChildCount(); i++ { + nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) + } + return nodes +} diff --git a/vendor/github.com/antlr4-go/antlr/v4/utils.go b/vendor/github.com/antlr4-go/antlr/v4/utils.go new file mode 100644 index 00000000..36a37f24 --- /dev/null +++ b/vendor/github.com/antlr4-go/antlr/v4/utils.go @@ -0,0 +1,381 @@ +// Copyright (c) 2012-2022 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "math/bits" + "os" + "strconv" + "strings" + "syscall" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +const bitsPerWord = 64 + +func indexForBit(bit int) int { + return bit / bitsPerWord +} + +//goland:noinspection GoUnusedExportedFunction,GoUnusedFunction +func wordForBit(data []uint64, bit int) uint64 { + idx := indexForBit(bit) + if idx >= len(data) { + return 0 + } + return data[idx] +} + +func maskForBit(bit int) uint64 { + return uint64(1) << (bit % bitsPerWord) +} + +func wordsNeeded(bit int) int { + return indexForBit(bit) + 1 +} + +type BitSet struct { + data []uint64 +} + +// NewBitSet creates a new bitwise set +// TODO: See if we can replace with the standard library's BitSet +func NewBitSet() *BitSet { + return &BitSet{} +} + +func (b *BitSet) add(value int) { + idx := indexForBit(value) + if idx >= len(b.data) { + size := wordsNeeded(value) + data := make([]uint64, size) + copy(data, b.data) + b.data = data + } + b.data[idx] |= maskForBit(value) +} + +func (b *BitSet) clear(index int) { + idx := indexForBit(index) + if idx >= len(b.data) { + return + } + b.data[idx] &= ^maskForBit(index) +} + +func (b *BitSet) or(set *BitSet) { + // Get min size necessary to represent the bits in both sets. + bLen := b.minLen() + setLen := set.minLen() + maxLen := intMax(bLen, setLen) + if maxLen > len(b.data) { + // Increase the size of len(b.data) to represent the bits in both sets. + data := make([]uint64, maxLen) + copy(data, b.data) + b.data = data + } + // len(b.data) is at least setLen. + for i := 0; i < setLen; i++ { + b.data[i] |= set.data[i] + } +} + +func (b *BitSet) remove(value int) { + b.clear(value) +} + +func (b *BitSet) contains(value int) bool { + idx := indexForBit(value) + if idx >= len(b.data) { + return false + } + return (b.data[idx] & maskForBit(value)) != 0 +} + +func (b *BitSet) minValue() int { + for i, v := range b.data { + if v == 0 { + continue + } + return i*bitsPerWord + bits.TrailingZeros64(v) + } + return 2147483647 +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if b == otherBitSet { + return true + } + + // We only compare set bits, so we cannot rely on the two slices having the same size. Its + // possible for two BitSets to have different slice lengths but the same set bits. So we only + // compare the relevant words and ignore the trailing zeros. + bLen := b.minLen() + otherLen := otherBitSet.minLen() + + if bLen != otherLen { + return false + } + + for i := 0; i < bLen; i++ { + if b.data[i] != otherBitSet.data[i] { + return false + } + } + + return true +} + +func (b *BitSet) minLen() int { + for i := len(b.data); i > 0; i-- { + if b.data[i-1] != 0 { + return i + } + } + return 0 +} + +func (b *BitSet) length() int { + cnt := 0 + for _, val := range b.data { + cnt += bits.OnesCount64(val) + } + return cnt +} + +func (b *BitSet) String() string { + vals := make([]string, 0, b.length()) + + for i, v := range b.data { + for v != 0 { + n := bits.TrailingZeros64(v) + vals = append(vals, strconv.Itoa(i*bitsPerWord+n)) + v &= ^(uint64(1) << n) + } + } + + return "{" + strings.Join(vals, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +//goland:noinspection GoUnusedExportedFunction +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +//goland:noinspection GoUnusedExportedFunction +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// murmur hash +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h int, value int) int { + const c1 uint32 = 0xCC9E2D51 + const c2 uint32 = 0x1B873593 + const r1 uint32 = 15 + const r2 uint32 = 13 + const m uint32 = 5 + const n uint32 = 0xE6546B64 + + k := uint32(value) + k *= c1 + k = (k << r1) | (k >> (32 - r1)) + k *= c2 + + hash := uint32(h) ^ k + hash = (hash << r2) | (hash >> (32 - r2)) + hash = hash*m + n + return int(hash) +} + +func murmurFinish(h int, numberOfWords int) int { + var hash = uint32(h) + hash ^= uint32(numberOfWords) << 2 + hash ^= hash >> 16 + hash *= 0x85ebca6b + hash ^= hash >> 13 + hash *= 0xc2b2ae35 + hash ^= hash >> 16 + + return int(hash) +} + +func isDirectory(dir string) (bool, error) { + fileInfo, err := os.Stat(dir) + if err != nil { + switch { + case errors.Is(err, syscall.ENOENT): + // The given directory does not exist, so we will try to create it + // + err = os.MkdirAll(dir, 0755) + if err != nil { + return false, err + } + + return true, nil + case err != nil: + return false, err + default: + } + } + return fileInfo.IsDir(), err +} + +// intSlicesEqual returns true if the two slices of ints are equal, and is a little +// faster than slices.Equal. +func intSlicesEqual(s1, s2 []int) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if v != s2[i] { + return false + } + } + return true +} + +func pcSliceEqual(s1, s2 []*PredictionContext) bool { + if s1 == nil && s2 == nil { + return true + } + if s1 == nil || s2 == nil { + return false + } + if len(s1) == 0 && len(s2) == 0 { + return true + } + if len(s1) == 0 || len(s2) == 0 || len(s1) != len(s2) { + return false + } + // If the slices are using the same memory, then they are the same slice + if &s1[0] == &s2[0] { + return true + } + for i, v := range s1 { + if !v.Equals(s2[i]) { + return false + } + } + return true +} diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 00000000..50d95c54 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 00000000..89b81799 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 00000000..9433004a --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 00000000..3676ee40 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 00000000..48482330 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 00000000..aac99f19 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,216 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + for _, fn := range opts { + fn(b) + } + b.Reset() + return b +} + +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 00000000..b9c0c51c --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 00000000..df9d68bc --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 00000000..8120d021 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 00000000..28d58ca3 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 97bb1a9d..92b78048 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,10 +1,24 @@ # Change history of go-restful -## [v3.11.1] - 2024-01-03 -- remove the dependency on github.com/json-iterator/go (#539) +## [v3.12.1] - 2024-05-28 -## [v3.11.0] - 2023-08-19 +- fix misroute when dealing multiple webservice with regex (#549) (thanks Haitao Chen) + +## [v3.12.0] - 2024-03-11 + +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 + +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + +## [v3.12.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 95a05a08..7234604e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f9..80adf55f 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go index ba1fc5d5..6fd2bcd5 100644 --- a/vendor/github.com/emicklei/go-restful/v3/curly.go +++ b/vendor/github.com/emicklei/go-restful/v3/curly.go @@ -46,10 +46,10 @@ func (c CurlyRouter) SelectRoute( // selectRoutes return a collection of Route from a WebService that matches the path tokens from the request. func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) - for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) + for _, eachRoute := range ws.routes { + matches, paramCount, staticCount := c.matchesRouteByPathTokens(eachRoute.pathParts, requestTokens, eachRoute.hasCustomVerb) if matches { - candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? + candidates.add(curlyRoute{eachRoute, paramCount, staticCount}) // TODO make sure Routes() return pointers? } } sort.Sort(candidates) @@ -72,7 +72,7 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] - if routeHasCustomVerb && hasCustomVerb(routeToken){ + if routeHasCustomVerb && hasCustomVerb(routeToken) { if !isMatchCustomVerb(routeToken, requestToken) { return false, 0, 0 } @@ -129,44 +129,52 @@ func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpReques // detectWebService returns the best matching webService given the list of path tokens. // see also computeWebserviceScore func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService { - var best *WebService + var bestWs *WebService score := -1 - for _, each := range webServices { - matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens) + for _, eachWS := range webServices { + matches, eachScore := c.computeWebserviceScore(requestTokens, eachWS.pathExpr.tokens) if matches && (eachScore > score) { - best = each + bestWs = eachWS score = eachScore } } - return best + return bestWs } // computeWebserviceScore returns whether tokens match and // the weighted score of the longest matching consecutive tokens from the beginning. -func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) { - if len(tokens) > len(requestTokens) { +func (c CurlyRouter) computeWebserviceScore(requestTokens []string, routeTokens []string) (bool, int) { + if len(routeTokens) > len(requestTokens) { return false, 0 } score := 0 - for i := 0; i < len(tokens); i++ { - each := requestTokens[i] - other := tokens[i] - if len(each) == 0 && len(other) == 0 { + for i := 0; i < len(routeTokens); i++ { + eachRequestToken := requestTokens[i] + eachRouteToken := routeTokens[i] + if len(eachRequestToken) == 0 && len(eachRouteToken) == 0 { score++ continue } - if len(other) > 0 && strings.HasPrefix(other, "{") { + if len(eachRouteToken) > 0 && strings.HasPrefix(eachRouteToken, "{") { // no empty match - if len(each) == 0 { + if len(eachRequestToken) == 0 { return false, score } - score += 1 + score++ + + if colon := strings.Index(eachRouteToken, ":"); colon != -1 { + // match by regex + matchesToken, _ := c.regularMatchesPathToken(eachRouteToken, colon, eachRequestToken) + if matchesToken { + score++ // extra score for regex match + } + } } else { // not a parameter - if each != other { + if eachRequestToken != eachRouteToken { return false, score } - score += (len(tokens) - i) * 10 //fuzzy + score += (len(routeTokens) - i) * 10 //fuzzy } } return true, score diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 8a35c959..9808752a 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -11,6 +11,12 @@ import ( "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. @@ -128,7 +134,7 @@ type entityJSONAccess struct { // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) + decoder := NewDecoder(req.Request.Body) decoder.UseNumber() return decoder.Decode(v) } @@ -147,7 +153,7 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er } if resp.prettyPrint { // pretty output must be created and written explicitly - output, err := json.MarshalIndent(v, "", " ") + output, err := MarshalIndent(v, "", " ") if err != nil { return err } @@ -159,5 +165,5 @@ func writeJSON(resp *Response, status int, contentType string, v interface{}) er // not-so-pretty resp.Header().Set(HEADER_ContentType, contentType) resp.WriteHeader(status) - return json.NewEncoder(resp).Encode(v) + return NewEncoder(resp).Encode(v) } diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e..a9b3faaa 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go index d975773d..d970c7cf 100644 --- a/vendor/github.com/go-openapi/jsonpointer/pointer.go +++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go @@ -264,7 +264,7 @@ func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error { knd := reflect.ValueOf(node).Kind() if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array { - return fmt.Errorf("only structs, pointers, maps and slices are supported for setting values") + return errors.New("only structs, pointers, maps and slices are supported for setting values") } if nameProvider == nil { diff --git a/vendor/github.com/go-openapi/swag/BENCHMARK.md b/vendor/github.com/go-openapi/swag/BENCHMARK.md new file mode 100644 index 00000000..e7f28ed6 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/BENCHMARK.md @@ -0,0 +1,52 @@ +# Benchmarks + +## Name mangling utilities + +```bash +go test -bench XXX -run XXX -benchtime 30s +``` + +### Benchmarks at b3e7a5386f996177e4808f11acb2aa93a0f660df + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 862623 44101 ns/op 10450 B/op 732 allocs/op +BenchmarkToXXXName/ToVarName-4 853656 40728 ns/op 10468 B/op 734 allocs/op +BenchmarkToXXXName/ToFileName-4 1268312 27813 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToCommandName-4 1276322 27903 ns/op 9785 B/op 617 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 895334 40354 ns/op 10472 B/op 731 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 882441 40678 ns/op 10566 B/op 749 allocs/op +``` + +### Benchmarks after PR #79 + +~ x10 performance improvement and ~ /100 memory allocations. + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz +BenchmarkToXXXName/ToGoName-4 9595830 3991 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-4 9194276 3984 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-4 17002711 2123 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-4 16772926 2111 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-4 9788331 3749 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-4 9188260 3941 ns/op 104 B/op 6 allocs/op +``` + +``` +goos: linux +goarch: amd64 +pkg: github.com/go-openapi/swag +cpu: AMD Ryzen 7 5800X 8-Core Processor +BenchmarkToXXXName/ToGoName-16 18527378 1972 ns/op 42 B/op 5 allocs/op +BenchmarkToXXXName/ToVarName-16 15552692 2093 ns/op 62 B/op 7 allocs/op +BenchmarkToXXXName/ToFileName-16 32161176 1117 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToCommandName-16 32256634 1137 ns/op 147 B/op 7 allocs/op +BenchmarkToXXXName/ToHumanNameLower-16 18599661 1946 ns/op 92 B/op 6 allocs/op +BenchmarkToXXXName/ToHumanNameTitle-16 17581353 2054 ns/op 105 B/op 6 allocs/op +``` diff --git a/vendor/github.com/go-openapi/swag/initialism_index.go b/vendor/github.com/go-openapi/swag/initialism_index.go index 03555184..20a359bb 100644 --- a/vendor/github.com/go-openapi/swag/initialism_index.go +++ b/vendor/github.com/go-openapi/swag/initialism_index.go @@ -16,9 +16,130 @@ package swag import ( "sort" + "strings" "sync" ) +var ( + // commonInitialisms are common acronyms that are kept as whole uppercased words. + commonInitialisms *indexOfInitialisms + + // initialisms is a slice of sorted initialisms + initialisms []string + + // a copy of initialisms pre-baked as []rune + initialismsRunes [][]rune + initialismsUpperCased [][]rune + + isInitialism func(string) bool + + maxAllocMatches int +) + +func init() { + // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 + configuredInitialisms := map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTPS": true, + "HTTP": true, + "ID": true, + "IP": true, + "IPv4": true, + "IPv6": true, + "JSON": true, + "LHS": true, + "OAI": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, + } + + // a thread-safe index of initialisms + commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) + maxAllocMatches = maxAllocHeuristic(initialismsRunes) + + // a test function + isInitialism = commonInitialisms.isInitialism +} + +func asRunes(in []string) [][]rune { + out := make([][]rune, len(in)) + for i, initialism := range in { + out[i] = []rune(initialism) + } + + return out +} + +func asUpperCased(in []string) [][]rune { + out := make([][]rune, len(in)) + + for i, initialism := range in { + out[i] = []rune(upper(trim(initialism))) + } + + return out +} + +func maxAllocHeuristic(in [][]rune) int { + heuristic := make(map[rune]int) + for _, initialism := range in { + heuristic[initialism[0]]++ + } + + var maxAlloc int + for _, val := range heuristic { + if val > maxAlloc { + maxAlloc = val + } + } + + return maxAlloc +} + +// AddInitialisms add additional initialisms +func AddInitialisms(words ...string) { + for _, word := range words { + // commonInitialisms[upper(word)] = true + commonInitialisms.add(upper(word)) + } + // sort again + initialisms = commonInitialisms.sorted() + initialismsRunes = asRunes(initialisms) + initialismsUpperCased = asUpperCased(initialisms) +} + // indexOfInitialisms is a thread-safe implementation of the sorted index of initialisms. // Since go1.9, this may be implemented with sync.Map. type indexOfInitialisms struct { @@ -55,7 +176,7 @@ func (m *indexOfInitialisms) add(key string) *indexOfInitialisms { func (m *indexOfInitialisms) sorted() (result []string) { m.sortMutex.Lock() defer m.sortMutex.Unlock() - m.index.Range(func(key, value interface{}) bool { + m.index.Range(func(key, _ interface{}) bool { k := key.(string) result = append(result, k) return true @@ -63,3 +184,19 @@ func (m *indexOfInitialisms) sorted() (result []string) { sort.Sort(sort.Reverse(byInitialism(result))) return } + +type byInitialism []string + +func (s byInitialism) Len() int { + return len(s) +} +func (s byInitialism) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s byInitialism) Less(i, j int) bool { + if len(s[i]) != len(s[j]) { + return len(s[i]) < len(s[j]) + } + + return strings.Compare(s[i], s[j]) > 0 +} diff --git a/vendor/github.com/go-openapi/swag/name_lexem.go b/vendor/github.com/go-openapi/swag/name_lexem.go index aa7f6a9b..8bb64ac3 100644 --- a/vendor/github.com/go-openapi/swag/name_lexem.go +++ b/vendor/github.com/go-openapi/swag/name_lexem.go @@ -14,74 +14,80 @@ package swag -import "unicode" +import ( + "unicode" + "unicode/utf8" +) type ( - nameLexem interface { - GetUnsafeGoName() string - GetOriginal() string - IsInitialism() bool - } + lexemKind uint8 - initialismNameLexem struct { + nameLexem struct { original string matchedInitialism string + kind lexemKind } +) - casualNameLexem struct { - original string - } +const ( + lexemKindCasualName lexemKind = iota + lexemKindInitialismName ) -func newInitialismNameLexem(original, matchedInitialism string) *initialismNameLexem { - return &initialismNameLexem{ +func newInitialismNameLexem(original, matchedInitialism string) nameLexem { + return nameLexem{ + kind: lexemKindInitialismName, original: original, matchedInitialism: matchedInitialism, } } -func newCasualNameLexem(original string) *casualNameLexem { - return &casualNameLexem{ +func newCasualNameLexem(original string) nameLexem { + return nameLexem{ + kind: lexemKindCasualName, original: original, } } -func (l *initialismNameLexem) GetUnsafeGoName() string { - return l.matchedInitialism -} +func (l nameLexem) GetUnsafeGoName() string { + if l.kind == lexemKindInitialismName { + return l.matchedInitialism + } + + var ( + first rune + rest string + ) -func (l *casualNameLexem) GetUnsafeGoName() string { - var first rune - var rest string for i, orig := range l.original { if i == 0 { first = orig continue } + if i > 0 { rest = l.original[i:] break } } + if len(l.original) > 1 { - return string(unicode.ToUpper(first)) + lower(rest) + b := poolOfBuffers.BorrowBuffer(utf8.UTFMax + len(rest)) + defer func() { + poolOfBuffers.RedeemBuffer(b) + }() + b.WriteRune(unicode.ToUpper(first)) + b.WriteString(lower(rest)) + return b.String() } return l.original } -func (l *initialismNameLexem) GetOriginal() string { +func (l nameLexem) GetOriginal() string { return l.original } -func (l *casualNameLexem) GetOriginal() string { - return l.original -} - -func (l *initialismNameLexem) IsInitialism() bool { - return true -} - -func (l *casualNameLexem) IsInitialism() bool { - return false +func (l nameLexem) IsInitialism() bool { + return l.kind == lexemKindInitialismName } diff --git a/vendor/github.com/go-openapi/swag/split.go b/vendor/github.com/go-openapi/swag/split.go index a1825fb7..274727a8 100644 --- a/vendor/github.com/go-openapi/swag/split.go +++ b/vendor/github.com/go-openapi/swag/split.go @@ -15,124 +15,269 @@ package swag import ( + "bytes" + "sync" "unicode" + "unicode/utf8" ) -var nameReplaceTable = map[rune]string{ - '@': "At ", - '&': "And ", - '|': "Pipe ", - '$': "Dollar ", - '!': "Bang ", - '-': "", - '_': "", -} - type ( splitter struct { - postSplitInitialismCheck bool initialisms []string + initialismsRunes [][]rune + initialismsUpperCased [][]rune // initialisms cached in their trimmed, upper-cased version + postSplitInitialismCheck bool + } + + splitterOption func(*splitter) + + initialismMatch struct { + body []rune + start, end int + complete bool + } + initialismMatches []initialismMatch +) + +type ( + // memory pools of temporary objects. + // + // These are used to recycle temporarily allocated objects + // and relieve the GC from undue pressure. + + matchesPool struct { + *sync.Pool } - splitterOption func(*splitter) *splitter + buffersPool struct { + *sync.Pool + } + + lexemsPool struct { + *sync.Pool + } + + splittersPool struct { + *sync.Pool + } ) -// split calls the splitter; splitter provides more control and post options +var ( + // poolOfMatches holds temporary slices for recycling during the initialism match process + poolOfMatches = matchesPool{ + Pool: &sync.Pool{ + New: func() any { + s := make(initialismMatches, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfBuffers = buffersPool{ + Pool: &sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } + + poolOfLexems = lexemsPool{ + Pool: &sync.Pool{ + New: func() any { + s := make([]nameLexem, 0, maxAllocMatches) + + return &s + }, + }, + } + + poolOfSplitters = splittersPool{ + Pool: &sync.Pool{ + New: func() any { + s := newSplitter() + + return &s + }, + }, + } +) + +// nameReplaceTable finds a word representation for special characters. +func nameReplaceTable(r rune) (string, bool) { + switch r { + case '@': + return "At ", true + case '&': + return "And ", true + case '|': + return "Pipe ", true + case '$': + return "Dollar ", true + case '!': + return "Bang ", true + case '-': + return "", true + case '_': + return "", true + default: + return "", false + } +} + +// split calls the splitter. +// +// Use newSplitter for more control and options func split(str string) []string { - lexems := newSplitter().split(str) - result := make([]string, 0, len(lexems)) + s := poolOfSplitters.BorrowSplitter() + lexems := s.split(str) + result := make([]string, 0, len(*lexems)) - for _, lexem := range lexems { + for _, lexem := range *lexems { result = append(result, lexem.GetOriginal()) } + poolOfLexems.RedeemLexems(lexems) + poolOfSplitters.RedeemSplitter(s) return result } -func (s *splitter) split(str string) []nameLexem { - return s.toNameLexems(str) -} - -func newSplitter(options ...splitterOption) *splitter { - splitter := &splitter{ +func newSplitter(options ...splitterOption) splitter { + s := splitter{ postSplitInitialismCheck: false, initialisms: initialisms, + initialismsRunes: initialismsRunes, + initialismsUpperCased: initialismsUpperCased, } for _, option := range options { - splitter = option(splitter) + option(&s) } - return splitter + return s } // withPostSplitInitialismCheck allows to catch initialisms after main split process -func withPostSplitInitialismCheck(s *splitter) *splitter { +func withPostSplitInitialismCheck(s *splitter) { s.postSplitInitialismCheck = true +} + +func (p matchesPool) BorrowMatches() *initialismMatches { + s := p.Get().(*initialismMatches) + *s = (*s)[:0] // reset slice, keep allocated capacity + return s } -type ( - initialismMatch struct { - start, end int - body []rune - complete bool +func (p buffersPool) BorrowBuffer(size int) *bytes.Buffer { + s := p.Get().(*bytes.Buffer) + s.Reset() + + if s.Cap() < size { + s.Grow(size) } - initialismMatches []*initialismMatch -) -func (s *splitter) toNameLexems(name string) []nameLexem { + return s +} + +func (p lexemsPool) BorrowLexems() *[]nameLexem { + s := p.Get().(*[]nameLexem) + *s = (*s)[:0] // reset slice, keep allocated capacity + + return s +} + +func (p splittersPool) BorrowSplitter(options ...splitterOption) *splitter { + s := p.Get().(*splitter) + s.postSplitInitialismCheck = false // reset options + for _, apply := range options { + apply(s) + } + + return s +} + +func (p matchesPool) RedeemMatches(s *initialismMatches) { + p.Put(s) +} + +func (p buffersPool) RedeemBuffer(s *bytes.Buffer) { + p.Put(s) +} + +func (p lexemsPool) RedeemLexems(s *[]nameLexem) { + p.Put(s) +} + +func (p splittersPool) RedeemSplitter(s *splitter) { + p.Put(s) +} + +func (m initialismMatch) isZero() bool { + return m.start == 0 && m.end == 0 +} + +func (s splitter) split(name string) *[]nameLexem { nameRunes := []rune(name) matches := s.gatherInitialismMatches(nameRunes) + if matches == nil { + return poolOfLexems.BorrowLexems() + } + return s.mapMatchesToNameLexems(nameRunes, matches) } -func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { - matches := make(initialismMatches, 0) +func (s splitter) gatherInitialismMatches(nameRunes []rune) *initialismMatches { + var matches *initialismMatches for currentRunePosition, currentRune := range nameRunes { - newMatches := make(initialismMatches, 0, len(matches)) + // recycle these allocations as we loop over runes + // with such recycling, only 2 slices should be allocated per call + // instead of o(n). + newMatches := poolOfMatches.BorrowMatches() // check current initialism matches - for _, match := range matches { - if keepCompleteMatch := match.complete; keepCompleteMatch { - newMatches = append(newMatches, match) - continue - } + if matches != nil { // skip first iteration + for _, match := range *matches { + if keepCompleteMatch := match.complete; keepCompleteMatch { + *newMatches = append(*newMatches, match) + continue + } - // drop failed match - currentMatchRune := match.body[currentRunePosition-match.start] - if !s.initialismRuneEqual(currentMatchRune, currentRune) { - continue - } + // drop failed match + currentMatchRune := match.body[currentRunePosition-match.start] + if currentMatchRune != currentRune { + continue + } - // try to complete ongoing match - if currentRunePosition-match.start == len(match.body)-1 { - // we are close; the next step is to check the symbol ahead - // if it is a small letter, then it is not the end of match - // but beginning of the next word - - if currentRunePosition < len(nameRunes)-1 { - nextRune := nameRunes[currentRunePosition+1] - if newWord := unicode.IsLower(nextRune); newWord { - // oh ok, it was the start of a new word - continue + // try to complete ongoing match + if currentRunePosition-match.start == len(match.body)-1 { + // we are close; the next step is to check the symbol ahead + // if it is a small letter, then it is not the end of match + // but beginning of the next word + + if currentRunePosition < len(nameRunes)-1 { + nextRune := nameRunes[currentRunePosition+1] + if newWord := unicode.IsLower(nextRune); newWord { + // oh ok, it was the start of a new word + continue + } } + + match.complete = true + match.end = currentRunePosition } - match.complete = true - match.end = currentRunePosition + *newMatches = append(*newMatches, match) } - - newMatches = append(newMatches, match) } // check for new initialism matches - for _, initialism := range s.initialisms { - initialismRunes := []rune(initialism) - if s.initialismRuneEqual(initialismRunes[0], currentRune) { - newMatches = append(newMatches, &initialismMatch{ + for i := range s.initialisms { + initialismRunes := s.initialismsRunes[i] + if initialismRunes[0] == currentRune { + *newMatches = append(*newMatches, initialismMatch{ start: currentRunePosition, body: initialismRunes, complete: false, @@ -140,24 +285,28 @@ func (s *splitter) gatherInitialismMatches(nameRunes []rune) initialismMatches { } } + if matches != nil { + poolOfMatches.RedeemMatches(matches) + } matches = newMatches } + // up to the caller to redeem this last slice return matches } -func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMatches) []nameLexem { - nameLexems := make([]nameLexem, 0) +func (s splitter) mapMatchesToNameLexems(nameRunes []rune, matches *initialismMatches) *[]nameLexem { + nameLexems := poolOfLexems.BorrowLexems() - var lastAcceptedMatch *initialismMatch - for _, match := range matches { + var lastAcceptedMatch initialismMatch + for _, match := range *matches { if !match.complete { continue } - if firstMatch := lastAcceptedMatch == nil; firstMatch { - nameLexems = append(nameLexems, s.breakCasualString(nameRunes[:match.start])...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + if firstMatch := lastAcceptedMatch.isZero(); firstMatch { + s.appendBrokenDownCasualString(nameLexems, nameRunes[:match.start]) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match @@ -169,63 +318,66 @@ func (s *splitter) mapMatchesToNameLexems(nameRunes []rune, matches initialismMa } middle := nameRunes[lastAcceptedMatch.end+1 : match.start] - nameLexems = append(nameLexems, s.breakCasualString(middle)...) - nameLexems = append(nameLexems, s.breakInitialism(string(match.body))) + s.appendBrokenDownCasualString(nameLexems, middle) + *nameLexems = append(*nameLexems, s.breakInitialism(string(match.body))) lastAcceptedMatch = match } // we have not found any accepted matches - if lastAcceptedMatch == nil { - return s.breakCasualString(nameRunes) - } - - if lastAcceptedMatch.end+1 != len(nameRunes) { + if lastAcceptedMatch.isZero() { + *nameLexems = (*nameLexems)[:0] + s.appendBrokenDownCasualString(nameLexems, nameRunes) + } else if lastAcceptedMatch.end+1 != len(nameRunes) { rest := nameRunes[lastAcceptedMatch.end+1:] - nameLexems = append(nameLexems, s.breakCasualString(rest)...) + s.appendBrokenDownCasualString(nameLexems, rest) } - return nameLexems -} + poolOfMatches.RedeemMatches(matches) -func (s *splitter) initialismRuneEqual(a, b rune) bool { - return a == b + return nameLexems } -func (s *splitter) breakInitialism(original string) nameLexem { +func (s splitter) breakInitialism(original string) nameLexem { return newInitialismNameLexem(original, original) } -func (s *splitter) breakCasualString(str []rune) []nameLexem { - segments := make([]nameLexem, 0) - currentSegment := "" +func (s splitter) appendBrokenDownCasualString(segments *[]nameLexem, str []rune) { + currentSegment := poolOfBuffers.BorrowBuffer(len(str)) // unlike strings.Builder, bytes.Buffer initial storage can reused + defer func() { + poolOfBuffers.RedeemBuffer(currentSegment) + }() addCasualNameLexem := func(original string) { - segments = append(segments, newCasualNameLexem(original)) + *segments = append(*segments, newCasualNameLexem(original)) } addInitialismNameLexem := func(original, match string) { - segments = append(segments, newInitialismNameLexem(original, match)) + *segments = append(*segments, newInitialismNameLexem(original, match)) } - addNameLexem := func(original string) { - if s.postSplitInitialismCheck { - for _, initialism := range s.initialisms { - if upper(initialism) == upper(original) { - addInitialismNameLexem(original, initialism) + var addNameLexem func(string) + if s.postSplitInitialismCheck { + addNameLexem = func(original string) { + for i := range s.initialisms { + if isEqualFoldIgnoreSpace(s.initialismsUpperCased[i], original) { + addInitialismNameLexem(original, s.initialisms[i]) + return } } - } - addCasualNameLexem(original) + addCasualNameLexem(original) + } + } else { + addNameLexem = addCasualNameLexem } - for _, rn := range string(str) { - if replace, found := nameReplaceTable[rn]; found { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + for _, rn := range str { + if replace, found := nameReplaceTable(rn); found { + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } if replace != "" { @@ -236,27 +388,121 @@ func (s *splitter) breakCasualString(str []rune) []nameLexem { } if !unicode.In(rn, unicode.L, unicode.M, unicode.N, unicode.Pc) { - if currentSegment != "" { - addNameLexem(currentSegment) - currentSegment = "" + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) + currentSegment.Reset() } continue } if unicode.IsUpper(rn) { - if currentSegment != "" { - addNameLexem(currentSegment) + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } - currentSegment = "" + currentSegment.Reset() } - currentSegment += string(rn) + currentSegment.WriteRune(rn) + } + + if currentSegment.Len() > 0 { + addNameLexem(currentSegment.String()) } +} + +// isEqualFoldIgnoreSpace is the same as strings.EqualFold, but +// it ignores leading and trailing blank spaces in the compared +// string. +// +// base is assumed to be composed of upper-cased runes, and be already +// trimmed. +// +// This code is heavily inspired from strings.EqualFold. +func isEqualFoldIgnoreSpace(base []rune, str string) bool { + var i, baseIndex int + // equivalent to b := []byte(str), but without data copy + b := hackStringBytes(str) + + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + break + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + break + } + i += size + } + + if i >= len(b) { + return len(base) == 0 + } + + for _, baseRune := range base { + if i >= len(b) { + break + } + + if c := b[i]; c < utf8.RuneSelf { + // single byte rune case (ASCII) + if baseRune >= utf8.RuneSelf { + return false + } + + baseChar := byte(baseRune) + if c != baseChar && + !('a' <= c && c <= 'z' && c-'a'+'A' == baseChar) { + return false + } + + baseIndex++ + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if unicode.ToUpper(r) != baseRune { + return false + } + baseIndex++ + i += size + } + + if baseIndex != len(base) { + return false + } + + // all passed: now we should only have blanks + for i < len(b) { + if c := b[i]; c < utf8.RuneSelf { + // fast path for ASCII + if c != ' ' && c != '\t' { + return false + } + i++ + + continue + } + + // unicode case + r, size := utf8.DecodeRune(b[i:]) + if !unicode.IsSpace(r) { + return false + } - if currentSegment != "" { - addNameLexem(currentSegment) + i += size } - return segments + return true } diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go new file mode 100644 index 00000000..90745d5c --- /dev/null +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -0,0 +1,8 @@ +package swag + +import "unsafe" + +// hackStringBytes returns the (unsafe) underlying bytes slice of a string. +func hackStringBytes(str string) []byte { + return unsafe.Slice(unsafe.StringData(str), len(str)) +} diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 0413f744..5051401c 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -18,76 +18,25 @@ import ( "reflect" "strings" "unicode" + "unicode/utf8" ) -// commonInitialisms are common acronyms that are kept as whole uppercased words. -var commonInitialisms *indexOfInitialisms - -// initialisms is a slice of sorted initialisms -var initialisms []string - -var isInitialism func(string) bool - // GoNamePrefixFunc sets an optional rule to prefix go names // which do not start with a letter. // +// The prefix function is assumed to return a string that starts with an upper case letter. +// // e.g. to help convert "123" into "{prefix}123" // // The default is to prefix with "X" var GoNamePrefixFunc func(string) string -func init() { - // Taken from https://github.com/golang/lint/blob/3390df4df2787994aea98de825b964ac7944b817/lint.go#L732-L769 - var configuredInitialisms = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTPS": true, - "HTTP": true, - "ID": true, - "IP": true, - "IPv4": true, - "IPv6": true, - "JSON": true, - "LHS": true, - "OAI": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, +func prefixFunc(name, in string) string { + if GoNamePrefixFunc == nil { + return "X" + in } - // a thread-safe index of initialisms - commonInitialisms = newIndexOfInitialisms().load(configuredInitialisms) - initialisms = commonInitialisms.sorted() - - // a test function - isInitialism = commonInitialisms.isInitialism + return GoNamePrefixFunc(name) + in } const ( @@ -156,22 +105,6 @@ func SplitByFormat(data, format string) []string { return result } -type byInitialism []string - -func (s byInitialism) Len() int { - return len(s) -} -func (s byInitialism) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} -func (s byInitialism) Less(i, j int) bool { - if len(s[i]) != len(s[j]) { - return len(s[i]) < len(s[j]) - } - - return strings.Compare(s[i], s[j]) > 0 -} - // Removes leading whitespaces func trim(str string) string { return strings.TrimSpace(str) @@ -188,15 +121,20 @@ func lower(str string) string { } // Camelize an uppercased word -func Camelize(word string) (camelized string) { +func Camelize(word string) string { + camelized := poolOfBuffers.BorrowBuffer(len(word)) + defer func() { + poolOfBuffers.RedeemBuffer(camelized) + }() + for pos, ru := range []rune(word) { if pos > 0 { - camelized += string(unicode.ToLower(ru)) + camelized.WriteRune(unicode.ToLower(ru)) } else { - camelized += string(unicode.ToUpper(ru)) + camelized.WriteRune(unicode.ToUpper(ru)) } } - return + return camelized.String() } // ToFileName lowercases and underscores a go type name @@ -224,26 +162,31 @@ func ToCommandName(name string) string { // ToHumanNameLower represents a code name as a human series of words func ToHumanNameLower(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) - out := make([]string, 0, len(in)) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) + out := make([]string, 0, len(*in)) - for _, w := range in { + for _, w := range *in { if !w.IsInitialism() { out = append(out, lower(w.GetOriginal())) } else { out = append(out, trim(w.GetOriginal())) } } + poolOfLexems.RedeemLexems(in) return strings.Join(out, " ") } // ToHumanNameTitle represents a code name as a human series of words with the first letters titleized func ToHumanNameTitle(name string) string { - in := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + in := s.split(name) + poolOfSplitters.RedeemSplitter(s) - out := make([]string, 0, len(in)) - for _, w := range in { + out := make([]string, 0, len(*in)) + for _, w := range *in { original := trim(w.GetOriginal()) if !w.IsInitialism() { out = append(out, Camelize(original)) @@ -251,6 +194,8 @@ func ToHumanNameTitle(name string) string { out = append(out, original) } } + poolOfLexems.RedeemLexems(in) + return strings.Join(out, " ") } @@ -283,35 +228,70 @@ func ToVarName(name string) string { // ToGoName translates a swagger name which can be underscored or camel cased to a name that golint likes func ToGoName(name string) string { - lexems := newSplitter(withPostSplitInitialismCheck).split(name) + s := poolOfSplitters.BorrowSplitter(withPostSplitInitialismCheck) + lexems := s.split(name) + poolOfSplitters.RedeemSplitter(s) + defer func() { + poolOfLexems.RedeemLexems(lexems) + }() + lexemes := *lexems + + if len(lexemes) == 0 { + return "" + } + + result := poolOfBuffers.BorrowBuffer(len(name)) + defer func() { + poolOfBuffers.RedeemBuffer(result) + }() + + // check if not starting with a letter, upper case + firstPart := lexemes[0].GetUnsafeGoName() + if lexemes[0].IsInitialism() { + firstPart = upper(firstPart) + } + + if c := firstPart[0]; c < utf8.RuneSelf { + // ASCII + switch { + case 'A' <= c && c <= 'Z': + result.WriteString(firstPart) + case 'a' <= c && c <= 'z': + result.WriteByte(c - 'a' + 'A') + result.WriteString(firstPart[1:]) + default: + result.WriteString(prefixFunc(name, firstPart)) + // NOTE: no longer check if prefixFunc returns a string that starts with uppercase: + // assume this is always the case + } + } else { + // unicode + firstRune, _ := utf8.DecodeRuneInString(firstPart) + switch { + case !unicode.IsLetter(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + case !unicode.IsUpper(firstRune): + result.WriteString(prefixFunc(name, firstPart)) + /* + result.WriteRune(unicode.ToUpper(firstRune)) + result.WriteString(firstPart[offset:]) + */ + default: + result.WriteString(firstPart) + } + } - result := "" - for _, lexem := range lexems { + for _, lexem := range lexemes[1:] { goName := lexem.GetUnsafeGoName() // to support old behavior if lexem.IsInitialism() { goName = upper(goName) } - result += goName + result.WriteString(goName) } - if len(result) > 0 { - // Only prefix with X when the first character isn't an ascii letter - first := []rune(result)[0] - if !unicode.IsLetter(first) || (first > unicode.MaxASCII && !unicode.IsUpper(first)) { - if GoNamePrefixFunc == nil { - return "X" + result - } - result = GoNamePrefixFunc(name) + result - } - first = []rune(result)[0] - if unicode.IsLetter(first) && !unicode.IsUpper(first) { - result = string(append([]rune{unicode.ToUpper(first)}, []rune(result)[1:]...)) - } - } - - return result + return result.String() } // ContainsStrings searches a slice of strings for a case-sensitive match @@ -376,16 +356,6 @@ func IsZero(data interface{}) bool { } } -// AddInitialisms add additional initialisms -func AddInitialisms(words ...string) { - for _, word := range words { - // commonInitialisms[upper(word)] = true - commonInitialisms.add(upper(word)) - } - // sort again - initialisms = commonInitialisms.sorted() -} - // CommandLineOptionsGroup represents a group of user-defined command line options type CommandLineOptionsGroup struct { ShortDescription string diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index a8c4e359..f59e0259 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -16,6 +16,7 @@ package swag import ( "encoding/json" + "errors" "fmt" "path/filepath" "reflect" @@ -50,7 +51,7 @@ func BytesToYAMLDoc(data []byte) (interface{}, error) { return nil, err } if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { - return nil, fmt.Errorf("only YAML documents that are objects are supported") + return nil, errors.New("only YAML documents that are objects are supported") } return &document, nil } diff --git a/vendor/github.com/google/cel-go/LICENSE b/vendor/github.com/google/cel-go/LICENSE new file mode 100644 index 00000000..2493ed2e --- /dev/null +++ b/vendor/github.com/google/cel-go/LICENSE @@ -0,0 +1,233 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +=========================================================================== +The common/types/pb/equal.go modification of proto.Equal logic +=========================================================================== +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/cel-go/cel/BUILD.bazel b/vendor/github.com/google/cel-go/cel/BUILD.bazel new file mode 100644 index 00000000..6e2fc073 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/BUILD.bazel @@ -0,0 +1,91 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "cel.go", + "decls.go", + "env.go", + "folding.go", + "io.go", + "inlining.go", + "library.go", + "macro.go", + "optimizer.go", + "options.go", + "program.go", + "validator.go", + ], + importpath = "github.com/google/cel-go/cel", + visibility = ["//visibility:public"], + deps = [ + "//checker:go_default_library", + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/containers:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//interpreter:go_default_library", + "//parser:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protodesc:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", + "@org_golang_google_protobuf//types/descriptorpb:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "cel_example_test.go", + "cel_test.go", + "decls_test.go", + "env_test.go", + "folding_test.go", + "io_test.go", + "inlining_test.go", + "optimizer_test.go", + "validator_test.go", + ], + data = [ + "//cel/testdata:gen_test_fds", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//ext:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/cel/cel.go b/vendor/github.com/google/cel-go/cel/cel.go new file mode 100644 index 00000000..eb5a9f4c --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/cel.go @@ -0,0 +1,19 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cel defines the top-level interface for the Common Expression Language (CEL). +// +// CEL is a non-Turing complete expression language designed to parse, check, and evaluate +// expressions against user-defined environments. +package cel diff --git a/vendor/github.com/google/cel-go/cel/decls.go b/vendor/github.com/google/cel-go/cel/decls.go new file mode 100644 index 00000000..b59e3708 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/decls.go @@ -0,0 +1,355 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Kind indicates a CEL type's kind which is used to differentiate quickly between simple and complex types. +type Kind = types.Kind + +const ( + // DynKind represents a dynamic type. This kind only exists at type-check time. + DynKind Kind = types.DynKind + + // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. + AnyKind = types.AnyKind + + // BoolKind represents a boolean type. + BoolKind = types.BoolKind + + // BytesKind represents a bytes type. + BytesKind = types.BytesKind + + // DoubleKind represents a double type. + DoubleKind = types.DoubleKind + + // DurationKind represents a CEL duration type. + DurationKind = types.DurationKind + + // IntKind represents an integer type. + IntKind = types.IntKind + + // ListKind represents a list type. + ListKind = types.ListKind + + // MapKind represents a map type. + MapKind = types.MapKind + + // NullTypeKind represents a null type. + NullTypeKind = types.NullTypeKind + + // OpaqueKind represents an abstract type which has no accessible fields. + OpaqueKind = types.OpaqueKind + + // StringKind represents a string type. + StringKind = types.StringKind + + // StructKind represents a structured object with typed fields. + StructKind = types.StructKind + + // TimestampKind represents a a CEL time type. + TimestampKind = types.TimestampKind + + // TypeKind represents the CEL type. + TypeKind = types.TypeKind + + // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. + TypeParamKind = types.TypeParamKind + + // UintKind represents a uint type. + UintKind = types.UintKind +) + +var ( + // AnyType represents the google.protobuf.Any type. + AnyType = types.AnyType + // BoolType represents the bool type. + BoolType = types.BoolType + // BytesType represents the bytes type. + BytesType = types.BytesType + // DoubleType represents the double type. + DoubleType = types.DoubleType + // DurationType represents the CEL duration type. + DurationType = types.DurationType + // DynType represents a dynamic CEL type whose type will be determined at runtime from context. + DynType = types.DynType + // IntType represents the int type. + IntType = types.IntType + // NullType represents the type of a null value. + NullType = types.NullType + // StringType represents the string type. + StringType = types.StringType + // TimestampType represents the time type. + TimestampType = types.TimestampType + // TypeType represents a CEL type + TypeType = types.TypeType + // UintType represents a uint type. + UintType = types.UintType + + // function references for instantiating new types. + + // ListType creates an instances of a list type value with the provided element type. + ListType = types.NewListType + // MapType creates an instance of a map type value with the provided key and value types. + MapType = types.NewMapType + // NullableType creates an instance of a nullable type with the provided wrapped type. + // + // Note: only primitive types are supported as wrapped types. + NullableType = types.NewNullableType + // OptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. + OptionalType = types.NewOptionalType + // OpaqueType creates an abstract parameterized type with a given name. + OpaqueType = types.NewOpaqueType + // ObjectType creates a type references to an externally defined type, e.g. a protobuf message type. + ObjectType = types.NewObjectType + // TypeParamType creates a parameterized type instance. + TypeParamType = types.NewTypeParamType +) + +// Type holds a reference to a runtime type with an optional type-checked set of type parameters. +type Type = types.Type + +// Constant creates an instances of an identifier declaration with a variable name, type, and value. +func Constant(name string, t *Type, v ref.Val) EnvOption { + return func(e *Env) (*Env, error) { + e.variables = append(e.variables, decls.NewConstant(name, t, v)) + return e, nil + } +} + +// Variable creates an instance of a variable declaration with a variable name and type. +func Variable(name string, t *Type) EnvOption { + return func(e *Env) (*Env, error) { + e.variables = append(e.variables, decls.NewVariable(name, t)) + return e, nil + } +} + +// Function defines a function and overloads with optional singleton or per-overload bindings. +// +// Using Function is roughly equivalent to calling Declarations() to declare the function signatures +// and Functions() to define the function bindings, if they have been defined. Specifying the +// same function name more than once will result in the aggregation of the function overloads. If any +// signatures conflict between the existing and new function definition an error will be raised. +// However, if the signatures are identical and the overload ids are the same, the redefinition will +// be considered a no-op. +// +// One key difference with using Function() is that each FunctionDecl provided will handle dynamic +// dispatch based on the type-signatures of the overloads provided which means overload resolution at +// runtime is handled out of the box rather than via a custom binding for overload resolution via +// Functions(): +// +// - Overloads are searched in the order they are declared +// - Dynamic dispatch for lists and maps is limited by inspection of the list and map contents +// +// at runtime. Empty lists and maps will result in a 'default dispatch' +// +// - In the event that a default dispatch occurs, the first overload provided is the one invoked +// +// If you intend to use overloads which differentiate based on the key or element type of a list or +// map, consider using a generic function instead: e.g. func(list(T)) or func(map(K, V)) as this +// will allow your implementation to determine how best to handle dispatch and the default behavior +// for empty lists and maps whose contents cannot be inspected. +// +// For functions which use parameterized opaque types (abstract types), consider using a singleton +// function which is capable of inspecting the contents of the type and resolving the appropriate +// overload as CEL can only make inferences by type-name regarding such types. +func Function(name string, opts ...FunctionOpt) EnvOption { + return func(e *Env) (*Env, error) { + fn, err := decls.NewFunction(name, opts...) + if err != nil { + return nil, err + } + if existing, found := e.functions[fn.Name()]; found { + fn, err = existing.Merge(fn) + if err != nil { + return nil, err + } + } + e.functions[fn.Name()] = fn + return e, nil + } +} + +// FunctionOpt defines a functional option for configuring a function declaration. +type FunctionOpt = decls.FunctionOpt + +// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { + return decls.SingletonUnaryBinding(fn, traits...) +} + +// SingletonBinaryImpl creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +// +// Deprecated: use SingletonBinaryBinding +func SingletonBinaryImpl(fn functions.BinaryOp, traits ...int) FunctionOpt { + return decls.SingletonBinaryBinding(fn, traits...) +} + +// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { + return decls.SingletonBinaryBinding(fn, traits...) +} + +// SingletonFunctionImpl creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +// +// Deprecated: use SingletonFunctionBinding +func SingletonFunctionImpl(fn functions.FunctionOp, traits ...int) FunctionOpt { + return decls.SingletonFunctionBinding(fn, traits...) +} + +// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { + return decls.SingletonFunctionBinding(fn, traits...) +} + +// DisableDeclaration disables the function signatures, effectively removing them from the type-check +// environment while preserving the runtime bindings. +func DisableDeclaration(value bool) FunctionOpt { + return decls.DisableDeclaration(value) +} + +// Overload defines a new global overload with an overload id, argument types, and result type. Through the +// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to +// be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func Overload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { + return decls.Overload(overloadID, args, resultType, opts...) +} + +// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, +// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding, +// an operand trait, and to be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func MemberOverload(overloadID string, args []*Type, resultType *Type, opts ...OverloadOpt) FunctionOpt { + return decls.MemberOverload(overloadID, args, resultType, opts...) +} + +// OverloadOpt is a functional option for configuring a function overload. +type OverloadOpt = decls.OverloadOpt + +// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func UnaryBinding(binding functions.UnaryOp) OverloadOpt { + return decls.UnaryBinding(binding) +} + +// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func BinaryBinding(binding functions.BinaryOp) OverloadOpt { + return decls.BinaryBinding(binding) +} + +// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func FunctionBinding(binding functions.FunctionOp) OverloadOpt { + return decls.FunctionBinding(binding) +} + +// OverloadIsNonStrict enables the function to be called with error and unknown argument values. +// +// Note: do not use this option unless absoluately necessary as it should be an uncommon feature. +func OverloadIsNonStrict() OverloadOpt { + return decls.OverloadIsNonStrict() +} + +// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be +// successfully invoked. +func OverloadOperandTrait(trait int) OverloadOpt { + return decls.OverloadOperandTrait(trait) +} + +// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. +func TypeToExprType(t *Type) (*exprpb.Type, error) { + return types.TypeToExprType(t) +} + +// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. +func ExprTypeToType(t *exprpb.Type) (*Type, error) { + return types.ExprTypeToType(t) +} + +// ExprDeclToDeclaration converts a protobuf CEL declaration to a CEL-native declaration, either a Variable or Function. +func ExprDeclToDeclaration(d *exprpb.Decl) (EnvOption, error) { + switch d.GetDeclKind().(type) { + case *exprpb.Decl_Function: + overloads := d.GetFunction().GetOverloads() + opts := make([]FunctionOpt, len(overloads)) + for i, o := range overloads { + args := make([]*Type, len(o.GetParams())) + for j, p := range o.GetParams() { + a, err := types.ExprTypeToType(p) + if err != nil { + return nil, err + } + args[j] = a + } + res, err := types.ExprTypeToType(o.GetResultType()) + if err != nil { + return nil, err + } + if o.IsInstanceFunction { + opts[i] = decls.MemberOverload(o.GetOverloadId(), args, res) + } else { + opts[i] = decls.Overload(o.GetOverloadId(), args, res) + } + } + return Function(d.GetName(), opts...), nil + case *exprpb.Decl_Ident: + t, err := types.ExprTypeToType(d.GetIdent().GetType()) + if err != nil { + return nil, err + } + if d.GetIdent().GetValue() == nil { + return Variable(d.GetName(), t), nil + } + val, err := ast.ConstantToVal(d.GetIdent().GetValue()) + if err != nil { + return nil, err + } + return Constant(d.GetName(), t, val), nil + default: + return nil, fmt.Errorf("unsupported decl: %v", d) + } +} diff --git a/vendor/github.com/google/cel-go/cel/env.go b/vendor/github.com/google/cel-go/cel/env.go new file mode 100644 index 00000000..a8650c4e --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/env.go @@ -0,0 +1,888 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "errors" + "sync" + + "github.com/google/cel-go/checker" + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common" + celast "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Source interface representing a user-provided expression. +type Source = common.Source + +// Ast representing the checked or unchecked expression, its source, and related metadata such as +// source position information. +type Ast struct { + source Source + impl *celast.AST +} + +// NativeRep converts the AST to a Go-native representation. +func (ast *Ast) NativeRep() *celast.AST { + if ast == nil { + return nil + } + return ast.impl +} + +// Expr returns the proto serializable instance of the parsed/checked expression. +// +// Deprecated: prefer cel.AstToCheckedExpr() or cel.AstToParsedExpr() and call GetExpr() +// the result instead. +func (ast *Ast) Expr() *exprpb.Expr { + if ast == nil { + return nil + } + pbExpr, _ := celast.ExprToProto(ast.NativeRep().Expr()) + return pbExpr +} + +// IsChecked returns whether the Ast value has been successfully type-checked. +func (ast *Ast) IsChecked() bool { + return ast.NativeRep().IsChecked() +} + +// SourceInfo returns character offset and newline position information about expression elements. +func (ast *Ast) SourceInfo() *exprpb.SourceInfo { + if ast == nil { + return nil + } + pbInfo, _ := celast.SourceInfoToProto(ast.NativeRep().SourceInfo()) + return pbInfo +} + +// ResultType returns the output type of the expression if the Ast has been type-checked, else +// returns chkdecls.Dyn as the parse step cannot infer the type. +// +// Deprecated: use OutputType +func (ast *Ast) ResultType() *exprpb.Type { + out := ast.OutputType() + t, err := TypeToExprType(out) + if err != nil { + return chkdecls.Dyn + } + return t +} + +// OutputType returns the output type of the expression if the Ast has been type-checked, else +// returns cel.DynType as the parse step cannot infer types. +func (ast *Ast) OutputType() *Type { + if ast == nil { + return types.ErrorType + } + return ast.NativeRep().GetType(ast.NativeRep().Expr().ID()) +} + +// Source returns a view of the input used to create the Ast. This source may be complete or +// constructed from the SourceInfo. +func (ast *Ast) Source() Source { + if ast == nil { + return nil + } + return ast.source +} + +// FormatType converts a type message into a string representation. +// +// Deprecated: prefer FormatCELType +func FormatType(t *exprpb.Type) string { + return checker.FormatCheckedType(t) +} + +// FormatCELType formats a cel.Type value to a string representation. +// +// The type formatting is identical to FormatType. +func FormatCELType(t *Type) string { + return checker.FormatCELType(t) +} + +// Env encapsulates the context necessary to perform parsing, type checking, or generation of +// evaluable programs for different expressions. +type Env struct { + Container *containers.Container + variables []*decls.VariableDecl + functions map[string]*decls.FunctionDecl + macros []parser.Macro + adapter types.Adapter + provider types.Provider + features map[int]bool + appliedFeatures map[int]bool + libraries map[string]bool + validators []ASTValidator + costOptions []checker.CostOption + + // Internal parser representation + prsr *parser.Parser + prsrOpts []parser.Option + + // Internal checker representation + chkMutex sync.Mutex + chk *checker.Env + chkErr error + chkOnce sync.Once + chkOpts []checker.Option + + // Program options tied to the environment + progOpts []ProgramOption +} + +// NewEnv creates a program environment configured with the standard library of CEL functions and +// macros. The Env value returned can parse and check any CEL program which builds upon the core +// features documented in the CEL specification. +// +// See the EnvOption helper functions for the options that can be used to configure the +// environment. +func NewEnv(opts ...EnvOption) (*Env, error) { + // Extend the statically configured standard environment, disabling eager validation to ensure + // the cost of setup for the environment is still just as cheap as it is in v0.11.x and earlier + // releases. The user provided options can easily re-enable the eager validation as they are + // processed after this default option. + stdOpts := append([]EnvOption{EagerlyValidateDeclarations(false)}, opts...) + env, err := getStdEnv() + if err != nil { + return nil, err + } + return env.Extend(stdOpts...) +} + +// NewCustomEnv creates a custom program environment which is not automatically configured with the +// standard library of functions and macros documented in the CEL spec. +// +// The purpose for using a custom environment might be for subsetting the standard library produced +// by the cel.StdLib() function. Subsetting CEL is a core aspect of its design that allows users to +// limit the compute and memory impact of a CEL program by controlling the functions and macros +// that may appear in a given expression. +// +// See the EnvOption helper functions for the options that can be used to configure the +// environment. +func NewCustomEnv(opts ...EnvOption) (*Env, error) { + registry, err := types.NewRegistry() + if err != nil { + return nil, err + } + return (&Env{ + variables: []*decls.VariableDecl{}, + functions: map[string]*decls.FunctionDecl{}, + macros: []parser.Macro{}, + Container: containers.DefaultContainer, + adapter: registry, + provider: registry, + features: map[int]bool{}, + appliedFeatures: map[int]bool{}, + libraries: map[string]bool{}, + validators: []ASTValidator{}, + progOpts: []ProgramOption{}, + costOptions: []checker.CostOption{}, + }).configure(opts) +} + +// Check performs type-checking on the input Ast and yields a checked Ast and/or set of Issues. +// If any `ASTValidators` are configured on the environment, they will be applied after a valid +// type-check result. If any issues are detected, the validators will provide them on the +// output Issues object. +// +// Either checking or validation has failed if the returned Issues value and its Issues.Err() +// value are non-nil. Issues should be inspected if they are non-nil, but may not represent a +// fatal error. +// +// It is possible to have both non-nil Ast and Issues values returned from this call: however, +// the mere presence of an Ast does not imply that it is valid for use. +func (e *Env) Check(ast *Ast) (*Ast, *Issues) { + // Construct the internal checker env, erroring if there is an issue adding the declarations. + chk, err := e.initChecker() + if err != nil { + errs := common.NewErrors(ast.Source()) + errs.ReportError(common.NoLocation, err.Error()) + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) + } + + checked, errs := checker.Check(ast.NativeRep(), ast.Source(), chk) + if len(errs.GetErrors()) > 0 { + return nil, NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) + } + // Manually create the Ast to ensure that the Ast source information (which may be more + // detailed than the information provided by Check), is returned to the caller. + ast = &Ast{ + source: ast.Source(), + impl: checked} + + // Avoid creating a validator config if it's not needed. + if len(e.validators) == 0 { + return ast, nil + } + + // Generate a validator configuration from the set of configured validators. + vConfig := newValidatorConfig() + for _, v := range e.validators { + if cv, ok := v.(ASTValidatorConfigurer); ok { + cv.Configure(vConfig) + } + } + // Apply additional validators on the type-checked result. + iss := NewIssuesWithSourceInfo(errs, ast.NativeRep().SourceInfo()) + for _, v := range e.validators { + v.Validate(e, vConfig, checked, iss) + } + if iss.Err() != nil { + return nil, iss + } + return ast, nil +} + +// Compile combines the Parse and Check phases CEL program compilation to produce an Ast and +// associated issues. +// +// If an error is encountered during parsing the Compile step will not continue with the Check +// phase. If non-error issues are encountered during Parse, they may be combined with any issues +// discovered during Check. +// +// Note, for parse-only uses of CEL use Parse. +func (e *Env) Compile(txt string) (*Ast, *Issues) { + return e.CompileSource(common.NewTextSource(txt)) +} + +// CompileSource combines the Parse and Check phases CEL program compilation to produce an Ast and +// associated issues. +// +// If an error is encountered during parsing the CompileSource step will not continue with the +// Check phase. If non-error issues are encountered during Parse, they may be combined with any +// issues discovered during Check. +// +// Note, for parse-only uses of CEL use Parse. +func (e *Env) CompileSource(src Source) (*Ast, *Issues) { + ast, iss := e.ParseSource(src) + if iss.Err() != nil { + return nil, iss + } + checked, iss2 := e.Check(ast) + if iss2.Err() != nil { + return nil, iss2 + } + return checked, iss2 +} + +// Extend the current environment with additional options to produce a new Env. +// +// Note, the extended Env value should not share memory with the original. It is possible, however, +// that a CustomTypeAdapter or CustomTypeProvider options could provide values which are mutable. +// To ensure separation of state between extended environments either make sure the TypeAdapter and +// TypeProvider are immutable, or that their underlying implementations are based on the +// ref.TypeRegistry which provides a Copy method which will be invoked by this method. +func (e *Env) Extend(opts ...EnvOption) (*Env, error) { + chk, chkErr := e.getCheckerOrError() + if chkErr != nil { + return nil, chkErr + } + + prsrOptsCopy := make([]parser.Option, len(e.prsrOpts)) + copy(prsrOptsCopy, e.prsrOpts) + + // The type-checker is configured with Declarations. The declarations may either be provided + // as options which have not yet been validated, or may come from a previous checker instance + // whose types have already been validated. + chkOptsCopy := make([]checker.Option, len(e.chkOpts)) + copy(chkOptsCopy, e.chkOpts) + + // Copy the declarations if needed. + if chk != nil { + // If the type-checker has already been instantiated, then the e.declarations have been + // validated within the chk instance. + chkOptsCopy = append(chkOptsCopy, checker.ValidatedDeclarations(chk)) + } + varsCopy := make([]*decls.VariableDecl, len(e.variables)) + copy(varsCopy, e.variables) + + // Copy macros and program options + macsCopy := make([]parser.Macro, len(e.macros)) + progOptsCopy := make([]ProgramOption, len(e.progOpts)) + copy(macsCopy, e.macros) + copy(progOptsCopy, e.progOpts) + + // Copy the adapter / provider if they appear to be mutable. + adapter := e.adapter + provider := e.provider + adapterReg, isAdapterReg := e.adapter.(*types.Registry) + providerReg, isProviderReg := e.provider.(*types.Registry) + // In most cases the provider and adapter will be a ref.TypeRegistry; + // however, in the rare cases where they are not, they are assumed to + // be immutable. Since it is possible to set the TypeProvider separately + // from the TypeAdapter, the possible configurations which could use a + // TypeRegistry as the base implementation are captured below. + if isAdapterReg && isProviderReg { + reg := providerReg.Copy() + provider = reg + // If the adapter and provider are the same object, set the adapter + // to the same ref.TypeRegistry as the provider. + if adapterReg == providerReg { + adapter = reg + } else { + // Otherwise, make a copy of the adapter. + adapter = adapterReg.Copy() + } + } else if isProviderReg { + provider = providerReg.Copy() + } else if isAdapterReg { + adapter = adapterReg.Copy() + } + + featuresCopy := make(map[int]bool, len(e.features)) + for k, v := range e.features { + featuresCopy[k] = v + } + appliedFeaturesCopy := make(map[int]bool, len(e.appliedFeatures)) + for k, v := range e.appliedFeatures { + appliedFeaturesCopy[k] = v + } + funcsCopy := make(map[string]*decls.FunctionDecl, len(e.functions)) + for k, v := range e.functions { + funcsCopy[k] = v + } + libsCopy := make(map[string]bool, len(e.libraries)) + for k, v := range e.libraries { + libsCopy[k] = v + } + validatorsCopy := make([]ASTValidator, len(e.validators)) + copy(validatorsCopy, e.validators) + costOptsCopy := make([]checker.CostOption, len(e.costOptions)) + copy(costOptsCopy, e.costOptions) + + ext := &Env{ + Container: e.Container, + variables: varsCopy, + functions: funcsCopy, + macros: macsCopy, + progOpts: progOptsCopy, + adapter: adapter, + features: featuresCopy, + appliedFeatures: appliedFeaturesCopy, + libraries: libsCopy, + validators: validatorsCopy, + provider: provider, + chkOpts: chkOptsCopy, + prsrOpts: prsrOptsCopy, + costOptions: costOptsCopy, + } + return ext.configure(opts) +} + +// HasFeature checks whether the environment enables the given feature +// flag, as enumerated in options.go. +func (e *Env) HasFeature(flag int) bool { + enabled, has := e.features[flag] + return has && enabled +} + +// HasLibrary returns whether a specific SingletonLibrary has been configured in the environment. +func (e *Env) HasLibrary(libName string) bool { + configured, exists := e.libraries[libName] + return exists && configured +} + +// Libraries returns a list of SingletonLibrary that have been configured in the environment. +func (e *Env) Libraries() []string { + libraries := make([]string, 0, len(e.libraries)) + for libName := range e.libraries { + libraries = append(libraries, libName) + } + return libraries +} + +// HasFunction returns whether a specific function has been configured in the environment +func (e *Env) HasFunction(functionName string) bool { + _, ok := e.functions[functionName] + return ok +} + +// Functions returns map of Functions, keyed by function name, that have been configured in the environment. +func (e *Env) Functions() map[string]*decls.FunctionDecl { + return e.functions +} + +// HasValidator returns whether a specific ASTValidator has been configured in the environment. +func (e *Env) HasValidator(name string) bool { + for _, v := range e.validators { + if v.Name() == name { + return true + } + } + return false +} + +// Parse parses the input expression value `txt` to a Ast and/or a set of Issues. +// +// This form of Parse creates a Source value for the input `txt` and forwards to the +// ParseSource method. +func (e *Env) Parse(txt string) (*Ast, *Issues) { + src := common.NewTextSource(txt) + return e.ParseSource(src) +} + +// ParseSource parses the input source to an Ast and/or set of Issues. +// +// Parsing has failed if the returned Issues value and its Issues.Err() value is non-nil. +// Issues should be inspected if they are non-nil, but may not represent a fatal error. +// +// It is possible to have both non-nil Ast and Issues values returned from this call; however, +// the mere presence of an Ast does not imply that it is valid for use. +func (e *Env) ParseSource(src Source) (*Ast, *Issues) { + parsed, errs := e.prsr.Parse(src) + if len(errs.GetErrors()) > 0 { + return nil, &Issues{errs: errs} + } + return &Ast{source: src, impl: parsed}, nil +} + +// Program generates an evaluable instance of the Ast within the environment (Env). +func (e *Env) Program(ast *Ast, opts ...ProgramOption) (Program, error) { + optSet := e.progOpts + if len(opts) != 0 { + mergedOpts := []ProgramOption{} + mergedOpts = append(mergedOpts, e.progOpts...) + mergedOpts = append(mergedOpts, opts...) + optSet = mergedOpts + } + return newProgram(e, ast, optSet) +} + +// CELTypeAdapter returns the `types.Adapter` configured for the environment. +func (e *Env) CELTypeAdapter() types.Adapter { + return e.adapter +} + +// CELTypeProvider returns the `types.Provider` configured for the environment. +func (e *Env) CELTypeProvider() types.Provider { + return e.provider +} + +// TypeAdapter returns the `ref.TypeAdapter` configured for the environment. +// +// Deprecated: use CELTypeAdapter() +func (e *Env) TypeAdapter() ref.TypeAdapter { + return e.adapter +} + +// TypeProvider returns the `ref.TypeProvider` configured for the environment. +// +// Deprecated: use CELTypeProvider() +func (e *Env) TypeProvider() ref.TypeProvider { + if legacyProvider, ok := e.provider.(ref.TypeProvider); ok { + return legacyProvider + } + return &interopLegacyTypeProvider{Provider: e.provider} +} + +// UnknownVars returns an interpreter.PartialActivation which marks all variables declared in the +// Env as unknown AttributePattern values. +// +// Note, the UnknownVars will behave the same as an interpreter.EmptyActivation unless the +// PartialAttributes option is provided as a ProgramOption. +func (e *Env) UnknownVars() interpreter.PartialActivation { + act := interpreter.EmptyActivation() + part, _ := PartialVars(act, e.computeUnknownVars(act)...) + return part +} + +// PartialVars returns an interpreter.PartialActivation where all variables not in the input variable +// set, but which have been configured in the environment, are marked as unknown. +// +// The `vars` value may either be an interpreter.Activation or any valid input to the +// interpreter.NewActivation call. +// +// Note, this is equivalent to calling cel.PartialVars and manually configuring the set of unknown +// variables. For more advanced use cases of partial state where portions of an object graph, rather +// than top-level variables, are missing the PartialVars() method may be a more suitable choice. +// +// Note, the PartialVars will behave the same as an interpreter.EmptyActivation unless the +// PartialAttributes option is provided as a ProgramOption. +func (e *Env) PartialVars(vars any) (interpreter.PartialActivation, error) { + act, err := interpreter.NewActivation(vars) + if err != nil { + return nil, err + } + return PartialVars(act, e.computeUnknownVars(act)...) +} + +// ResidualAst takes an Ast and its EvalDetails to produce a new Ast which only contains the +// attribute references which are unknown. +// +// Residual expressions are beneficial in a few scenarios: +// +// - Optimizing constant expression evaluations away. +// - Indexing and pruning expressions based on known input arguments. +// - Surfacing additional requirements that are needed in order to complete an evaluation. +// - Sharing the evaluation of an expression across multiple machines/nodes. +// +// For example, if an expression targets a 'resource' and 'request' attribute and the possible +// values for the resource are known, a PartialActivation could mark the 'request' as an unknown +// interpreter.AttributePattern and the resulting ResidualAst would be reduced to only the parts +// of the expression that reference the 'request'. +// +// Note, the expression ids within the residual AST generated through this method have no +// correlation to the expression ids of the original AST. +// +// See the PartialVars helper for how to construct a PartialActivation. +// +// TODO: Consider adding an option to generate a Program.Residual to avoid round-tripping to an +// Ast format and then Program again. +func (e *Env) ResidualAst(a *Ast, details *EvalDetails) (*Ast, error) { + pruned := interpreter.PruneAst(a.impl.Expr(), a.impl.SourceInfo().MacroCalls(), details.State()) + newAST := &Ast{source: a.Source(), impl: pruned} + expr, err := AstToString(newAST) + if err != nil { + return nil, err + } + parsed, iss := e.Parse(expr) + if iss != nil && iss.Err() != nil { + return nil, iss.Err() + } + if !a.IsChecked() { + return parsed, nil + } + checked, iss := e.Check(parsed) + if iss != nil && iss.Err() != nil { + return nil, iss.Err() + } + return checked, nil +} + +// EstimateCost estimates the cost of a type checked CEL expression using the length estimates of input data and +// extension functions provided by estimator. +func (e *Env) EstimateCost(ast *Ast, estimator checker.CostEstimator, opts ...checker.CostOption) (checker.CostEstimate, error) { + extendedOpts := make([]checker.CostOption, 0, len(e.costOptions)) + extendedOpts = append(extendedOpts, opts...) + extendedOpts = append(extendedOpts, e.costOptions...) + return checker.Cost(ast.impl, estimator, extendedOpts...) +} + +// configure applies a series of EnvOptions to the current environment. +func (e *Env) configure(opts []EnvOption) (*Env, error) { + // Customized the environment using the provided EnvOption values. If an error is + // generated at any step this, will be returned as a nil Env with a non-nil error. + var err error + for _, opt := range opts { + e, err = opt(e) + if err != nil { + return nil, err + } + } + + // If the default UTC timezone fix has been enabled, make sure the library is configured + e, err = e.maybeApplyFeature(featureDefaultUTCTimeZone, Lib(timeUTCLibrary{})) + if err != nil { + return nil, err + } + + // Configure the parser. + prsrOpts := []parser.Option{} + prsrOpts = append(prsrOpts, e.prsrOpts...) + prsrOpts = append(prsrOpts, parser.Macros(e.macros...)) + + if e.HasFeature(featureEnableMacroCallTracking) { + prsrOpts = append(prsrOpts, parser.PopulateMacroCalls(true)) + } + if e.HasFeature(featureVariadicLogicalASTs) { + prsrOpts = append(prsrOpts, parser.EnableVariadicOperatorASTs(true)) + } + e.prsr, err = parser.NewParser(prsrOpts...) + if err != nil { + return nil, err + } + + // Ensure that the checker init happens eagerly rather than lazily. + if e.HasFeature(featureEagerlyValidateDeclarations) { + _, err := e.initChecker() + if err != nil { + return nil, err + } + } + + return e, nil +} + +func (e *Env) initChecker() (*checker.Env, error) { + e.chkOnce.Do(func() { + chkOpts := []checker.Option{} + chkOpts = append(chkOpts, e.chkOpts...) + chkOpts = append(chkOpts, + checker.CrossTypeNumericComparisons( + e.HasFeature(featureCrossTypeNumericComparisons))) + + ce, err := checker.NewEnv(e.Container, e.provider, chkOpts...) + if err != nil { + e.setCheckerOrError(nil, err) + return + } + // Add the statically configured declarations. + err = ce.AddIdents(e.variables...) + if err != nil { + e.setCheckerOrError(nil, err) + return + } + // Add the function declarations which are derived from the FunctionDecl instances. + for _, fn := range e.functions { + if fn.IsDeclarationDisabled() { + continue + } + err = ce.AddFunctions(fn) + if err != nil { + e.setCheckerOrError(nil, err) + return + } + } + // Add function declarations here separately. + e.setCheckerOrError(ce, nil) + }) + return e.getCheckerOrError() +} + +// setCheckerOrError sets the checker.Env or error state in a concurrency-safe manner +func (e *Env) setCheckerOrError(chk *checker.Env, chkErr error) { + e.chkMutex.Lock() + e.chk = chk + e.chkErr = chkErr + e.chkMutex.Unlock() +} + +// getCheckerOrError gets the checker.Env or error state in a concurrency-safe manner +func (e *Env) getCheckerOrError() (*checker.Env, error) { + e.chkMutex.Lock() + defer e.chkMutex.Unlock() + return e.chk, e.chkErr +} + +// maybeApplyFeature determines whether the feature-guarded option is enabled, and if so applies +// the feature if it has not already been enabled. +func (e *Env) maybeApplyFeature(feature int, option EnvOption) (*Env, error) { + if !e.HasFeature(feature) { + return e, nil + } + _, applied := e.appliedFeatures[feature] + if applied { + return e, nil + } + e, err := option(e) + if err != nil { + return nil, err + } + // record that the feature has been applied since it will generate declarations + // and functions which will be propagated on Extend() calls and which should only + // be registered once. + e.appliedFeatures[feature] = true + return e, nil +} + +// computeUnknownVars determines a set of missing variables based on the input activation and the +// environment's configured declaration set. +func (e *Env) computeUnknownVars(vars interpreter.Activation) []*interpreter.AttributePattern { + var unknownPatterns []*interpreter.AttributePattern + for _, v := range e.variables { + varName := v.Name() + if _, found := vars.ResolveName(varName); found { + continue + } + unknownPatterns = append(unknownPatterns, interpreter.NewAttributePattern(varName)) + } + return unknownPatterns +} + +// Error type which references an expression id, a location within source, and a message. +type Error = common.Error + +// Issues defines methods for inspecting the error details of parse and check calls. +// +// Note: in the future, non-fatal warnings and notices may be inspectable via the Issues struct. +type Issues struct { + errs *common.Errors + info *celast.SourceInfo +} + +// NewIssues returns an Issues struct from a common.Errors object. +func NewIssues(errs *common.Errors) *Issues { + return NewIssuesWithSourceInfo(errs, nil) +} + +// NewIssuesWithSourceInfo returns an Issues struct from a common.Errors object with SourceInfo metatata +// which can be used with the `ReportErrorAtID` method for additional error reports within the context +// information that's inferred from an expression id. +func NewIssuesWithSourceInfo(errs *common.Errors, info *celast.SourceInfo) *Issues { + return &Issues{ + errs: errs, + info: info, + } +} + +// Err returns an error value if the issues list contains one or more errors. +func (i *Issues) Err() error { + if i == nil { + return nil + } + if len(i.Errors()) > 0 { + return errors.New(i.String()) + } + return nil +} + +// Errors returns the collection of errors encountered in more granular detail. +func (i *Issues) Errors() []*Error { + if i == nil { + return []*Error{} + } + return i.errs.GetErrors() +} + +// Append collects the issues from another Issues struct into a new Issues object. +func (i *Issues) Append(other *Issues) *Issues { + if i == nil { + return other + } + if other == nil || i == other { + return i + } + return NewIssuesWithSourceInfo(i.errs.Append(other.errs.GetErrors()), i.info) +} + +// String converts the issues to a suitable display string. +func (i *Issues) String() string { + if i == nil { + return "" + } + return i.errs.ToDisplayString() +} + +// ReportErrorAtID reports an error message with an optional set of formatting arguments. +// +// The source metadata for the expression at `id`, if present, is attached to the error report. +// To ensure that source metadata is attached to error reports, use NewIssuesWithSourceInfo. +func (i *Issues) ReportErrorAtID(id int64, message string, args ...any) { + i.errs.ReportErrorAtID(id, i.info.GetStartLocation(id), message, args...) +} + +// getStdEnv lazy initializes the CEL standard environment. +func getStdEnv() (*Env, error) { + stdEnvInit.Do(func() { + stdEnv, stdEnvErr = NewCustomEnv(StdLib(), EagerlyValidateDeclarations(true)) + }) + return stdEnv, stdEnvErr +} + +// interopCELTypeProvider layers support for the types.Provider interface on top of a ref.TypeProvider. +type interopCELTypeProvider struct { + ref.TypeProvider +} + +// FindStructType returns a types.Type instance for the given fully-qualified typeName if one exists. +// +// This method proxies to the underlying ref.TypeProvider's FindType method and converts protobuf type +// into a native type representation. If the conversion fails, the type is listed as not found. +func (p *interopCELTypeProvider) FindStructType(typeName string) (*types.Type, bool) { + if et, found := p.FindType(typeName); found { + t, err := types.ExprTypeToType(et) + if err != nil { + return nil, false + } + return t, true + } + return nil, false +} + +// FindStructFieldNames returns an empty set of field for the interop provider. +// +// To inspect the field names, migrate to a `types.Provider` implementation. +func (p *interopCELTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + +// FindStructFieldType returns a types.FieldType instance for the given fully-qualified typeName and field +// name, if one exists. +// +// This method proxies to the underlying ref.TypeProvider's FindFieldType method and converts protobuf type +// into a native type representation. If the conversion fails, the type is listed as not found. +func (p *interopCELTypeProvider) FindStructFieldType(structType, fieldName string) (*types.FieldType, bool) { + if ft, found := p.FindFieldType(structType, fieldName); found { + t, err := types.ExprTypeToType(ft.Type) + if err != nil { + return nil, false + } + return &types.FieldType{ + Type: t, + IsSet: ft.IsSet, + GetFrom: ft.GetFrom, + }, true + } + return nil, false +} + +// interopLegacyTypeProvider layers support for the ref.TypeProvider interface on top of a types.Provider. +type interopLegacyTypeProvider struct { + types.Provider +} + +// FindType retruns the protobuf Type representation for the input type name if one exists. +// +// This method proxies to the underlying types.Provider FindStructType method and converts the types.Type +// value to a protobuf Type representation. +// +// Failure to convert the type will result in the type not being found. +func (p *interopLegacyTypeProvider) FindType(typeName string) (*exprpb.Type, bool) { + if t, found := p.FindStructType(typeName); found { + et, err := types.TypeToExprType(t) + if err != nil { + return nil, false + } + return et, true + } + return nil, false +} + +// FindFieldType returns the protobuf-based FieldType representation for the input type name and field, +// if one exists. +// +// This call proxies to the types.Provider FindStructFieldType method and converts the types.FIeldType +// value to a protobuf-based ref.FieldType representation if found. +// +// Failure to convert the FieldType will result in the field not being found. +func (p *interopLegacyTypeProvider) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { + if cft, found := p.FindStructFieldType(structType, fieldName); found { + et, err := types.TypeToExprType(cft.Type) + if err != nil { + return nil, false + } + return &ref.FieldType{ + Type: et, + IsSet: cft.IsSet, + GetFrom: cft.GetFrom, + }, true + } + return nil, false +} + +var ( + stdEnvInit sync.Once + stdEnv *Env + stdEnvErr error +) diff --git a/vendor/github.com/google/cel-go/cel/folding.go b/vendor/github.com/google/cel-go/cel/folding.go new file mode 100644 index 00000000..d7060896 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/folding.go @@ -0,0 +1,559 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// ConstantFoldingOption defines a functional option for configuring constant folding. +type ConstantFoldingOption func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) + +// MaxConstantFoldIterations limits the number of times literals may be folding during optimization. +// +// Defaults to 100 if not set. +func MaxConstantFoldIterations(limit int) ConstantFoldingOption { + return func(opt *constantFoldingOptimizer) (*constantFoldingOptimizer, error) { + opt.maxFoldIterations = limit + return opt, nil + } +} + +// NewConstantFoldingOptimizer creates an optimizer which inlines constant scalar an aggregate +// literal values within function calls and select statements with their evaluated result. +func NewConstantFoldingOptimizer(opts ...ConstantFoldingOption) (ASTOptimizer, error) { + folder := &constantFoldingOptimizer{ + maxFoldIterations: defaultMaxConstantFoldIterations, + } + var err error + for _, o := range opts { + folder, err = o(folder) + if err != nil { + return nil, err + } + } + return folder, nil +} + +type constantFoldingOptimizer struct { + maxFoldIterations int +} + +// Optimize queries the expression graph for scalar and aggregate literal expressions within call and +// select statements and then evaluates them and replaces the call site with the literal result. +// +// Note: only values which can be represented as literals in CEL syntax are supported. +func (opt *constantFoldingOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + + // Walk the list of foldable expression and continue to fold until there are no more folds left. + // All of the fold candidates returned by the constantExprMatcher should succeed unless there's + // a logic bug with the selection of expressions. + foldableExprs := ast.MatchDescendants(root, constantExprMatcher) + foldCount := 0 + for len(foldableExprs) != 0 && foldCount < opt.maxFoldIterations { + for _, fold := range foldableExprs { + // If the expression could be folded because it's a non-strict call, and the + // branches are pruned, continue to the next fold. + if fold.Kind() == ast.CallKind && maybePruneBranches(ctx, fold) { + continue + } + // Otherwise, assume all context is needed to evaluate the expression. + err := tryFold(ctx, a, fold) + if err != nil { + ctx.ReportErrorAtID(fold.ID(), "constant-folding evaluation failed: %v", err.Error()) + return a + } + } + foldCount++ + foldableExprs = ast.MatchDescendants(root, constantExprMatcher) + } + // Once all of the constants have been folded, try to run through the remaining comprehensions + // one last time. In this case, there's no guarantee they'll run, so we only update the + // target comprehension node with the literal value if the evaluation succeeds. + for _, compre := range ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) { + tryFold(ctx, a, compre) + } + + // If the output is a list, map, or struct which contains optional entries, then prune it + // to make sure that the optionals, if resolved, do not surface in the output literal. + pruneOptionalElements(ctx, root) + + // Ensure that all intermediate values in the folded expression can be represented as valid + // CEL literals within the AST structure. Use `PostOrderVisit` rather than `MatchDescendents` + // to avoid extra allocations during this final pass through the AST. + ast.PostOrderVisit(root, ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() != ast.LiteralKind { + return + } + val := e.AsLiteral() + adapted, err := adaptLiteral(ctx, val) + if err != nil { + ctx.ReportErrorAtID(root.ID(), "constant-folding evaluation failed: %v", err.Error()) + return + } + ctx.UpdateExpr(e, adapted) + })) + + return a +} + +// tryFold attempts to evaluate a sub-expression to a literal. +// +// If the evaluation succeeds, the input expr value will be modified to become a literal, otherwise +// the method will return an error. +func tryFold(ctx *OptimizerContext, a *ast.AST, expr ast.Expr) error { + // Assume all context is needed to evaluate the expression. + subAST := &Ast{ + impl: ast.NewCheckedAST(ast.NewAST(expr, a.SourceInfo()), a.TypeMap(), a.ReferenceMap()), + } + prg, err := ctx.Program(subAST) + if err != nil { + return err + } + out, _, err := prg.Eval(NoVars()) + if err != nil { + return err + } + // Update the fold expression to be a literal. + ctx.UpdateExpr(expr, ctx.NewLiteral(out)) + return nil +} + +// maybePruneBranches inspects the non-strict call expression to determine whether +// a branch can be removed. Evaluation will naturally prune logical and / or calls, +// but conditional will not be pruned cleanly, so this is one small area where the +// constant folding step reimplements a portion of the evaluator. +func maybePruneBranches(ctx *OptimizerContext, expr ast.NavigableExpr) bool { + call := expr.AsCall() + args := call.Args() + switch call.FunctionName() { + case operators.LogicalAnd, operators.LogicalOr: + return maybeShortcircuitLogic(ctx, call.FunctionName(), args, expr) + case operators.Conditional: + cond := args[0] + truthy := args[1] + falsy := args[2] + if cond.Kind() != ast.LiteralKind { + return false + } + if cond.AsLiteral() == types.True { + ctx.UpdateExpr(expr, truthy) + } else { + ctx.UpdateExpr(expr, falsy) + } + return true + case operators.In: + haystack := args[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.False)) + return true + } + needle := args[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + ctx.UpdateExpr(expr, ctx.NewLiteral(types.True)) + return true + } + } + } + } + return false +} + +func maybeShortcircuitLogic(ctx *OptimizerContext, function string, args []ast.Expr, expr ast.NavigableExpr) bool { + shortcircuit := types.False + skip := types.True + if function == operators.LogicalOr { + shortcircuit = types.True + skip = types.False + } + newArgs := []ast.Expr{} + for _, arg := range args { + if arg.Kind() != ast.LiteralKind { + newArgs = append(newArgs, arg) + continue + } + if arg.AsLiteral() == skip { + continue + } + if arg.AsLiteral() == shortcircuit { + ctx.UpdateExpr(expr, arg) + return true + } + } + if len(newArgs) == 0 { + newArgs = append(newArgs, args[0]) + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + if len(newArgs) == 1 { + ctx.UpdateExpr(expr, newArgs[0]) + return true + } + ctx.UpdateExpr(expr, ctx.NewCall(function, newArgs...)) + return true +} + +// pruneOptionalElements works from the bottom up to resolve optional elements within +// aggregate literals. +// +// Note, many aggregate literals will be resolved as arguments to functions or select +// statements, so this method exists to handle the case where the literal could not be +// fully resolved or exists outside of a call, select, or comprehension context. +func pruneOptionalElements(ctx *OptimizerContext, root ast.NavigableExpr) { + aggregateLiterals := ast.MatchDescendants(root, aggregateLiteralMatcher) + for _, lit := range aggregateLiterals { + switch lit.Kind() { + case ast.ListKind: + pruneOptionalListElements(ctx, lit) + case ast.MapKind: + pruneOptionalMapEntries(ctx, lit) + case ast.StructKind: + pruneOptionalStructFields(ctx, lit) + } + } +} + +func pruneOptionalListElements(ctx *OptimizerContext, e ast.Expr) { + l := e.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() + if len(optIndices) == 0 { + return + } + updatedElems := []ast.Expr{} + updatedIndices := []int32{} + newOptIndex := -1 + for _, e := range elems { + newOptIndex++ + if !l.IsOptional(int32(newOptIndex)) { + updatedElems = append(updatedElems, e) + continue + } + if e.Kind() != ast.LiteralKind { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + optElemVal, ok := e.AsLiteral().(*types.Optional) + if !ok { + updatedElems = append(updatedElems, e) + updatedIndices = append(updatedIndices, int32(newOptIndex)) + continue + } + if !optElemVal.HasValue() { + newOptIndex-- // Skipping causes the list to get smaller. + continue + } + ctx.UpdateExpr(e, ctx.NewLiteral(optElemVal.GetValue())) + updatedElems = append(updatedElems, e) + } + ctx.UpdateExpr(e, ctx.NewList(updatedElems, updatedIndices)) +} + +func pruneOptionalMapEntries(ctx *OptimizerContext, e ast.Expr) { + m := e.AsMap() + entries := m.Entries() + updatedEntries := []ast.EntryExpr{} + modified := false + for _, e := range entries { + entry := e.AsMapEntry() + key := entry.Key() + val := entry.Value() + // If the entry is not optional, or the value-side of the optional hasn't + // been resolved to a literal, then preserve the entry as-is. + if !entry.IsOptional() || val.Kind() != ast.LiteralKind { + updatedEntries = append(updatedEntries, e) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedEntries = append(updatedEntries, e) + continue + } + // When the key is not a literal, but the value is, then it needs to be + // restored to an optional value. + if key.Kind() != ast.LiteralKind { + undoOptVal, err := adaptLiteral(ctx, optElemVal) + if err != nil { + ctx.ReportErrorAtID(val.ID(), "invalid map value literal %v: %v", optElemVal, err) + } + ctx.UpdateExpr(val, undoOptVal) + updatedEntries = append(updatedEntries, e) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedEntry := ctx.NewMapEntry(key, val, false) + updatedEntries = append(updatedEntries, updatedEntry) + } + if modified { + ctx.UpdateExpr(e, ctx.NewMap(updatedEntries)) + } +} + +func pruneOptionalStructFields(ctx *OptimizerContext, e ast.Expr) { + s := e.AsStruct() + fields := s.Fields() + updatedFields := []ast.EntryExpr{} + modified := false + for _, f := range fields { + field := f.AsStructField() + val := field.Value() + if !field.IsOptional() || val.Kind() != ast.LiteralKind { + updatedFields = append(updatedFields, f) + continue + } + optElemVal, ok := val.AsLiteral().(*types.Optional) + if !ok { + updatedFields = append(updatedFields, f) + continue + } + modified = true + if !optElemVal.HasValue() { + continue + } + ctx.UpdateExpr(val, ctx.NewLiteral(optElemVal.GetValue())) + updatedField := ctx.NewStructField(field.Name(), val, false) + updatedFields = append(updatedFields, updatedField) + } + if modified { + ctx.UpdateExpr(e, ctx.NewStruct(s.TypeName(), updatedFields)) + } +} + +// adaptLiteral converts a runtime CEL value to its equivalent literal expression. +// +// For strongly typed values, the type-provider will be used to reconstruct the fields +// which are present in the literal and their equivalent initialization values. +func adaptLiteral(ctx *OptimizerContext, val ref.Val) (ast.Expr, error) { + switch t := val.Type().(type) { + case *types.Type: + switch t { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + return ctx.NewLiteral(val), nil + case types.DurationType: + return ctx.NewCall( + overloads.TypeConvertDuration, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.TimestampType: + return ctx.NewCall( + overloads.TypeConvertTimestamp, + ctx.NewLiteral(val.ConvertToType(types.StringType)), + ), nil + case types.OptionalType: + opt := val.(*types.Optional) + if !opt.HasValue() { + return ctx.NewCall("optional.none"), nil + } + target, err := adaptLiteral(ctx, opt.GetValue()) + if err != nil { + return nil, err + } + return ctx.NewCall("optional.of", target), nil + case types.TypeType: + return ctx.NewIdent(val.(*types.Type).TypeName()), nil + case types.ListType: + l, ok := val.(traits.Lister) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + elems := make([]ast.Expr, l.Size().(types.Int)) + idx := 0 + it := l.Iterator() + for it.HasNext() == types.True { + elemVal := it.Next() + elemExpr, err := adaptLiteral(ctx, elemVal) + if err != nil { + return nil, err + } + elems[idx] = elemExpr + idx++ + } + return ctx.NewList(elems, []int32{}), nil + case types.MapType: + m, ok := val.(traits.Mapper) + if !ok { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + entries := make([]ast.EntryExpr, m.Size().(types.Int)) + idx := 0 + it := m.Iterator() + for it.HasNext() == types.True { + keyVal := it.Next() + keyExpr, err := adaptLiteral(ctx, keyVal) + if err != nil { + return nil, err + } + valVal := m.Get(keyVal) + valExpr, err := adaptLiteral(ctx, valVal) + if err != nil { + return nil, err + } + entries[idx] = ctx.NewMapEntry(keyExpr, valExpr, false) + idx++ + } + return ctx.NewMap(entries), nil + default: + provider := ctx.CELTypeProvider() + fields, found := provider.FindStructFieldNames(t.TypeName()) + if !found { + return nil, fmt.Errorf("failed to adapt %v to literal", val) + } + tester := val.(traits.FieldTester) + indexer := val.(traits.Indexer) + fieldInits := []ast.EntryExpr{} + for _, f := range fields { + field := types.String(f) + if tester.IsSet(field) != types.True { + continue + } + fieldVal := indexer.Get(field) + fieldExpr, err := adaptLiteral(ctx, fieldVal) + if err != nil { + return nil, err + } + fieldInits = append(fieldInits, ctx.NewStructField(f, fieldExpr, false)) + } + return ctx.NewStruct(t.TypeName(), fieldInits), nil + } + } + return nil, fmt.Errorf("failed to adapt %v to literal", val) +} + +// constantExprMatcher matches calls, select statements, and comprehensions whose arguments +// are all constant scalar or aggregate literal values. +// +// Only comprehensions which are not nested are included as possible constant folds, and only +// if all variables referenced in the comprehension stack exist are only iteration or +// accumulation variables. +func constantExprMatcher(e ast.NavigableExpr) bool { + switch e.Kind() { + case ast.CallKind: + return constantCallMatcher(e) + case ast.SelectKind: + sel := e.AsSelect() // guaranteed to be a navigable value + return constantMatcher(sel.Operand().(ast.NavigableExpr)) + case ast.ComprehensionKind: + if isNestedComprehension(e) { + return false + } + vars := map[string]bool{} + constantExprs := true + visitor := ast.NewExprVisitor(func(e ast.Expr) { + if e.Kind() == ast.ComprehensionKind { + nested := e.AsComprehension() + vars[nested.AccuVar()] = true + vars[nested.IterVar()] = true + } + if e.Kind() == ast.IdentKind && !vars[e.AsIdent()] { + constantExprs = false + } + }) + ast.PreOrderVisit(e, visitor) + return constantExprs + default: + return false + } +} + +// constantCallMatcher identifies strict and non-strict calls which can be folded. +func constantCallMatcher(e ast.NavigableExpr) bool { + call := e.AsCall() + children := e.Children() + fnName := call.FunctionName() + if fnName == operators.LogicalAnd { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.LogicalOr { + for _, child := range children { + if child.Kind() == ast.LiteralKind { + return true + } + } + } + if fnName == operators.Conditional { + cond := children[0] + if cond.Kind() == ast.LiteralKind && cond.AsLiteral().Type() == types.BoolType { + return true + } + } + if fnName == operators.In { + haystack := children[1] + if haystack.Kind() == ast.ListKind && haystack.AsList().Size() == 0 { + return true + } + needle := children[0] + if needle.Kind() == ast.LiteralKind && haystack.Kind() == ast.ListKind { + needleValue := needle.AsLiteral() + list := haystack.AsList() + for _, e := range list.Elements() { + if e.Kind() == ast.LiteralKind && e.AsLiteral().Equal(needleValue) == types.True { + return true + } + } + } + } + // convert all other calls with constant arguments + for _, child := range children { + if !constantMatcher(child) { + return false + } + } + return true +} + +func isNestedComprehension(e ast.NavigableExpr) bool { + parent, found := e.Parent() + for found { + if parent.Kind() == ast.ComprehensionKind { + return true + } + parent, found = parent.Parent() + } + return false +} + +func aggregateLiteralMatcher(e ast.NavigableExpr) bool { + return e.Kind() == ast.ListKind || e.Kind() == ast.MapKind || e.Kind() == ast.StructKind +} + +var ( + constantMatcher = ast.ConstantValueMatcher() +) + +const ( + defaultMaxConstantFoldIterations = 100 +) diff --git a/vendor/github.com/google/cel-go/cel/inlining.go b/vendor/github.com/google/cel-go/cel/inlining.go new file mode 100644 index 00000000..78d5bea6 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/inlining.go @@ -0,0 +1,228 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/traits" +) + +// InlineVariable holds a variable name to be matched and an AST representing +// the expression graph which should be used to replace it. +type InlineVariable struct { + name string + alias string + def *ast.AST +} + +// Name returns the qualified variable or field selection to replace. +func (v *InlineVariable) Name() string { + return v.name +} + +// Alias returns the alias to use when performing cel.bind() calls during inlining. +func (v *InlineVariable) Alias() string { + return v.alias +} + +// Expr returns the inlined expression value. +func (v *InlineVariable) Expr() ast.Expr { + return v.def.Expr() +} + +// Type indicates the inlined expression type. +func (v *InlineVariable) Type() *Type { + return v.def.GetType(v.def.Expr().ID()) +} + +// NewInlineVariable declares a variable name to be replaced by a checked expression. +func NewInlineVariable(name string, definition *Ast) *InlineVariable { + return NewInlineVariableWithAlias(name, name, definition) +} + +// NewInlineVariableWithAlias declares a variable name to be replaced by a checked expression. +// If the variable occurs more than once, the provided alias will be used to replace the expressions +// where the variable name occurs. +func NewInlineVariableWithAlias(name, alias string, definition *Ast) *InlineVariable { + return &InlineVariable{name: name, alias: alias, def: definition.impl} +} + +// NewInliningOptimizer creates and optimizer which replaces variables with expression definitions. +// +// If a variable occurs one time, the variable is replaced by the inline definition. If the +// variable occurs more than once, the variable occurences are replaced by a cel.bind() call. +func NewInliningOptimizer(inlineVars ...*InlineVariable) ASTOptimizer { + return &inliningOptimizer{variables: inlineVars} +} + +type inliningOptimizer struct { + variables []*InlineVariable +} + +func (opt *inliningOptimizer) Optimize(ctx *OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + for _, inlineVar := range opt.variables { + matches := ast.MatchDescendants(root, opt.matchVariable(inlineVar.Name())) + // Skip cases where the variable isn't in the expression graph + if len(matches) == 0 { + continue + } + + // For a single match, do a direct replacement of the expression sub-graph. + if len(matches) == 1 || !isBindable(matches, inlineVar.Expr(), inlineVar.Type()) { + for _, match := range matches { + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + opt.inlineExpr(ctx, match, copyExpr, inlineVar.Type()) + } + continue + } + + // For multiple matches, find the least common ancestor (lca) and insert the + // variable as a cel.bind() macro. + var lca ast.NavigableExpr = root + lcaAncestorCount := 0 + ancestors := map[int64]int{} + for _, match := range matches { + // Update the identifier matches with the provided alias. + parent, found := match, true + for found { + ancestorCount, hasAncestor := ancestors[parent.ID()] + if !hasAncestor { + ancestors[parent.ID()] = 1 + parent, found = parent.Parent() + continue + } + if lcaAncestorCount < ancestorCount || (lcaAncestorCount == ancestorCount && lca.Depth() < parent.Depth()) { + lca = parent + lcaAncestorCount = ancestorCount + } + ancestors[parent.ID()] = ancestorCount + 1 + parent, found = parent.Parent() + } + aliasExpr := ctx.NewIdent(inlineVar.Alias()) + opt.inlineExpr(ctx, match, aliasExpr, inlineVar.Type()) + } + + // Copy the inlined AST expr and source info. + copyExpr := ctx.CopyASTAndMetadata(inlineVar.def) + // Update the least common ancestor by inserting a cel.bind() call to the alias. + inlined, bindMacro := ctx.NewBindMacro(lca.ID(), inlineVar.Alias(), copyExpr, lca) + opt.inlineExpr(ctx, lca, inlined, inlineVar.Type()) + ctx.SetMacroCall(lca.ID(), bindMacro) + } + return a +} + +// inlineExpr replaces the current expression with the inlined one, unless the location of the inlining +// happens within a presence test, e.g. has(a.b.c) -> inline alpha for a.b.c in which case an attempt is +// made to determine whether the inlined value can be presence or existence tested. +func (opt *inliningOptimizer) inlineExpr(ctx *OptimizerContext, prev ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) { + switch prev.Kind() { + case ast.SelectKind: + sel := prev.AsSelect() + if !sel.IsTestOnly() { + ctx.UpdateExpr(prev, inlined) + return + } + opt.rewritePresenceExpr(ctx, prev, inlined, inlinedType) + default: + ctx.UpdateExpr(prev, inlined) + } +} + +// rewritePresenceExpr converts the inlined expression, when it occurs within a has() macro, to type-safe +// expression appropriate for the inlined type, if possible. +// +// If the rewrite is not possible an error is reported at the inline expression site. +func (opt *inliningOptimizer) rewritePresenceExpr(ctx *OptimizerContext, prev, inlined ast.Expr, inlinedType *Type) { + // If the input inlined expression is not a select expression it won't work with the has() + // macro. Attempt to rewrite the presence test in terms of the typed input, otherwise error. + if inlined.Kind() == ast.SelectKind { + presenceTest, hasMacro := ctx.NewHasMacro(prev.ID(), inlined) + ctx.UpdateExpr(prev, presenceTest) + ctx.SetMacroCall(prev.ID(), hasMacro) + return + } + + ctx.ClearMacroCall(prev.ID()) + if inlinedType.IsAssignableType(NullType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + inlined, + ctx.NewLiteral(types.NullValue), + )) + return + } + if inlinedType.HasTrait(traits.SizerType) { + ctx.UpdateExpr(prev, + ctx.NewCall(operators.NotEquals, + ctx.NewMemberCall(overloads.Size, inlined), + ctx.NewLiteral(types.IntZero), + )) + return + } + ctx.ReportErrorAtID(prev.ID(), "unable to inline expression type %v into presence test", inlinedType) +} + +// isBindable indicates whether the inlined type can be used within a cel.bind() if the expression +// being replaced occurs within a presence test. Value types with a size() method or field selection +// support can be bound. +// +// In future iterations, support may also be added for indexer types which can be rewritten as an `in` +// expression; however, this would imply a rewrite of the inlined expression that may not be necessary +// in most cases. +func isBindable(matches []ast.NavigableExpr, inlined ast.Expr, inlinedType *Type) bool { + if inlinedType.IsAssignableType(NullType) || + inlinedType.HasTrait(traits.SizerType) { + return true + } + for _, m := range matches { + if m.Kind() != ast.SelectKind { + continue + } + sel := m.AsSelect() + if sel.IsTestOnly() { + return false + } + } + return true +} + +// matchVariable matches simple identifiers, select expressions, and presence test expressions +// which match the (potentially) qualified variable name provided as input. +// +// Note, this function does not support inlining against select expressions which includes optional +// field selection. This may be a future refinement. +func (opt *inliningOptimizer) matchVariable(varName string) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + return true + } + if e.Kind() == ast.SelectKind { + sel := e.AsSelect() + // While the `ToQualifiedName` call could take the select directly, this + // would skip presence tests from possible matches, which we would like + // to include. + qualName, found := containers.ToQualifiedName(sel.Operand()) + return found && qualName+"."+sel.FieldName() == varName + } + return false + } +} diff --git a/vendor/github.com/google/cel-go/cel/io.go b/vendor/github.com/google/cel-go/cel/io.go new file mode 100644 index 00000000..3133fb9d --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/io.go @@ -0,0 +1,252 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "errors" + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// CheckedExprToAst converts a checked expression proto message to an Ast. +func CheckedExprToAst(checkedExpr *exprpb.CheckedExpr) *Ast { + checked, _ := CheckedExprToAstWithSource(checkedExpr, nil) + return checked +} + +// CheckedExprToAstWithSource converts a checked expression proto message to an Ast, +// using the provided Source as the textual contents. +// +// In general the source is not necessary unless the AST has been modified between the +// `Parse` and `Check` calls as an `Ast` created from the `Parse` step will carry the source +// through future calls. +// +// Prefer CheckedExprToAst if loading expressions from storage. +func CheckedExprToAstWithSource(checkedExpr *exprpb.CheckedExpr, src Source) (*Ast, error) { + checked, err := ast.ToAST(checkedExpr) + if err != nil { + return nil, err + } + return &Ast{source: src, impl: checked}, nil +} + +// AstToCheckedExpr converts an Ast to an protobuf CheckedExpr value. +// +// If the Ast.IsChecked() returns false, this conversion method will return an error. +func AstToCheckedExpr(a *Ast) (*exprpb.CheckedExpr, error) { + if !a.IsChecked() { + return nil, fmt.Errorf("cannot convert unchecked ast") + } + return ast.ToProto(a.impl) +} + +// ParsedExprToAst converts a parsed expression proto message to an Ast. +func ParsedExprToAst(parsedExpr *exprpb.ParsedExpr) *Ast { + return ParsedExprToAstWithSource(parsedExpr, nil) +} + +// ParsedExprToAstWithSource converts a parsed expression proto message to an Ast, +// using the provided Source as the textual contents. +// +// In general you only need this if you need to recheck a previously checked +// expression, or if you need to separately check a subset of an expression. +// +// Prefer ParsedExprToAst if loading expressions from storage. +func ParsedExprToAstWithSource(parsedExpr *exprpb.ParsedExpr, src Source) *Ast { + info, _ := ast.ProtoToSourceInfo(parsedExpr.GetSourceInfo()) + if src == nil { + src = common.NewInfoSource(parsedExpr.GetSourceInfo()) + } + e, _ := ast.ProtoToExpr(parsedExpr.GetExpr()) + return &Ast{source: src, impl: ast.NewAST(e, info)} +} + +// AstToParsedExpr converts an Ast to an protobuf ParsedExpr value. +func AstToParsedExpr(a *Ast) (*exprpb.ParsedExpr, error) { + return &exprpb.ParsedExpr{ + Expr: a.Expr(), + SourceInfo: a.SourceInfo(), + }, nil +} + +// AstToString converts an Ast back to a string if possible. +// +// Note, the conversion may not be an exact replica of the original expression, but will produce +// a string that is semantically equivalent and whose textual representation is stable. +func AstToString(a *Ast) (string, error) { + return parser.Unparse(a.impl.Expr(), a.impl.SourceInfo()) +} + +// RefValueToValue converts between ref.Val and api.expr.Value. +// The result Value is the serialized proto form. The ref.Val must not be error or unknown. +func RefValueToValue(res ref.Val) (*exprpb.Value, error) { + switch res.Type() { + case types.BoolType: + return &exprpb.Value{ + Kind: &exprpb.Value_BoolValue{BoolValue: res.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Value{ + Kind: &exprpb.Value_BytesValue{BytesValue: res.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Value{ + Kind: &exprpb.Value_DoubleValue{DoubleValue: res.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Value{ + Kind: &exprpb.Value_Int64Value{Int64Value: res.Value().(int64)}}, nil + case types.ListType: + l := res.(traits.Lister) + sz := l.Size().(types.Int) + elts := make([]*exprpb.Value, 0, int64(sz)) + for i := types.Int(0); i < sz; i++ { + v, err := RefValueToValue(l.Get(i)) + if err != nil { + return nil, err + } + elts = append(elts, v) + } + return &exprpb.Value{ + Kind: &exprpb.Value_ListValue{ + ListValue: &exprpb.ListValue{Values: elts}}}, nil + case types.MapType: + mapper := res.(traits.Mapper) + sz := mapper.Size().(types.Int) + entries := make([]*exprpb.MapValue_Entry, 0, int64(sz)) + for it := mapper.Iterator(); it.HasNext().(types.Bool); { + k := it.Next() + v := mapper.Get(k) + kv, err := RefValueToValue(k) + if err != nil { + return nil, err + } + vv, err := RefValueToValue(v) + if err != nil { + return nil, err + } + entries = append(entries, &exprpb.MapValue_Entry{Key: kv, Value: vv}) + } + return &exprpb.Value{ + Kind: &exprpb.Value_MapValue{ + MapValue: &exprpb.MapValue{Entries: entries}}}, nil + case types.NullType: + return &exprpb.Value{ + Kind: &exprpb.Value_NullValue{}}, nil + case types.StringType: + return &exprpb.Value{ + Kind: &exprpb.Value_StringValue{StringValue: res.Value().(string)}}, nil + case types.TypeType: + typeName := res.(ref.Type).TypeName() + return &exprpb.Value{Kind: &exprpb.Value_TypeValue{TypeValue: typeName}}, nil + case types.UintType: + return &exprpb.Value{ + Kind: &exprpb.Value_Uint64Value{Uint64Value: res.Value().(uint64)}}, nil + default: + any, err := res.ConvertToNative(anyPbType) + if err != nil { + return nil, err + } + return &exprpb.Value{ + Kind: &exprpb.Value_ObjectValue{ObjectValue: any.(*anypb.Any)}}, nil + } +} + +var ( + typeNameToTypeValue = map[string]ref.Val{ + "bool": types.BoolType, + "bytes": types.BytesType, + "double": types.DoubleType, + "null_type": types.NullType, + "int": types.IntType, + "list": types.ListType, + "map": types.MapType, + "string": types.StringType, + "type": types.TypeType, + "uint": types.UintType, + } + + anyPbType = reflect.TypeOf(&anypb.Any{}) +) + +// ValueToRefValue converts between exprpb.Value and ref.Val. +func ValueToRefValue(adapter types.Adapter, v *exprpb.Value) (ref.Val, error) { + switch v.Kind.(type) { + case *exprpb.Value_NullValue: + return types.NullValue, nil + case *exprpb.Value_BoolValue: + return types.Bool(v.GetBoolValue()), nil + case *exprpb.Value_Int64Value: + return types.Int(v.GetInt64Value()), nil + case *exprpb.Value_Uint64Value: + return types.Uint(v.GetUint64Value()), nil + case *exprpb.Value_DoubleValue: + return types.Double(v.GetDoubleValue()), nil + case *exprpb.Value_StringValue: + return types.String(v.GetStringValue()), nil + case *exprpb.Value_BytesValue: + return types.Bytes(v.GetBytesValue()), nil + case *exprpb.Value_ObjectValue: + any := v.GetObjectValue() + msg, err := anypb.UnmarshalNew(any, proto.UnmarshalOptions{DiscardUnknown: true}) + if err != nil { + return nil, err + } + return adapter.NativeToValue(msg), nil + case *exprpb.Value_MapValue: + m := v.GetMapValue() + entries := make(map[ref.Val]ref.Val) + for _, entry := range m.Entries { + key, err := ValueToRefValue(adapter, entry.Key) + if err != nil { + return nil, err + } + pb, err := ValueToRefValue(adapter, entry.Value) + if err != nil { + return nil, err + } + entries[key] = pb + } + return adapter.NativeToValue(entries), nil + case *exprpb.Value_ListValue: + l := v.GetListValue() + elts := make([]ref.Val, len(l.Values)) + for i, e := range l.Values { + rv, err := ValueToRefValue(adapter, e) + if err != nil { + return nil, err + } + elts[i] = rv + } + return adapter.NativeToValue(elts), nil + case *exprpb.Value_TypeValue: + typeName := v.GetTypeValue() + tv, ok := typeNameToTypeValue[typeName] + if ok { + return tv, nil + } + return types.NewObjectTypeValue(typeName), nil + } + return nil, errors.New("unknown value") +} diff --git a/vendor/github.com/google/cel-go/cel/library.go b/vendor/github.com/google/cel-go/cel/library.go new file mode 100644 index 00000000..be59f1b0 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/library.go @@ -0,0 +1,790 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "math" + "strconv" + "strings" + "time" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/stdlib" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/parser" +) + +const ( + optMapMacro = "optMap" + optFlatMapMacro = "optFlatMap" + hasValueFunc = "hasValue" + optionalNoneFunc = "optional.none" + optionalOfFunc = "optional.of" + optionalOfNonZeroValueFunc = "optional.ofNonZeroValue" + valueFunc = "value" + unusedIterVar = "#unused" +) + +// Library provides a collection of EnvOption and ProgramOption values used to configure a CEL +// environment for a particular use case or with a related set of functionality. +// +// Note, the ProgramOption values provided by a library are expected to be static and not vary +// between calls to Env.Program(). If there is a need for such dynamic configuration, prefer to +// configure these options outside the Library and within the Env.Program() call directly. +type Library interface { + // CompileOptions returns a collection of functional options for configuring the Parse / Check + // environment. + CompileOptions() []EnvOption + + // ProgramOptions returns a collection of functional options which should be included in every + // Program generated from the Env.Program() call. + ProgramOptions() []ProgramOption +} + +// SingletonLibrary refines the Library interface to ensure that libraries in this format are only +// configured once within the environment. +type SingletonLibrary interface { + Library + + // LibraryName provides a namespaced name which is used to check whether the library has already + // been configured in the environment. + LibraryName() string +} + +// Lib creates an EnvOption out of a Library, allowing libraries to be provided as functional args, +// and to be linked to each other. +func Lib(l Library) EnvOption { + singleton, isSingleton := l.(SingletonLibrary) + return func(e *Env) (*Env, error) { + if isSingleton { + if e.HasLibrary(singleton.LibraryName()) { + return e, nil + } + e.libraries[singleton.LibraryName()] = true + } + var err error + for _, opt := range l.CompileOptions() { + e, err = opt(e) + if err != nil { + return nil, err + } + } + e.progOpts = append(e.progOpts, l.ProgramOptions()...) + return e, nil + } +} + +// StdLib returns an EnvOption for the standard library of CEL functions and macros. +func StdLib() EnvOption { + return Lib(stdLibrary{}) +} + +// stdLibrary implements the Library interface and provides functional options for the core CEL +// features documented in the specification. +type stdLibrary struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (stdLibrary) LibraryName() string { + return "cel.lib.std" +} + +// CompileOptions returns options for the standard CEL function declarations and macros. +func (stdLibrary) CompileOptions() []EnvOption { + return []EnvOption{ + func(e *Env) (*Env, error) { + var err error + for _, fn := range stdlib.Functions() { + existing, found := e.functions[fn.Name()] + if found { + fn, err = existing.Merge(fn) + if err != nil { + return nil, err + } + } + e.functions[fn.Name()] = fn + } + return e, nil + }, + func(e *Env) (*Env, error) { + e.variables = append(e.variables, stdlib.Types()...) + return e, nil + }, + Macros(StandardMacros...), + } +} + +// ProgramOptions returns function implementations for the standard CEL functions. +func (stdLibrary) ProgramOptions() []ProgramOption { + return []ProgramOption{} +} + +// OptionalTypes enable support for optional syntax and types in CEL. +// +// The optional value type makes it possible to express whether variables have +// been provided, whether a result has been computed, and in the future whether +// an object field path, map key value, or list index has a value. +// +// # Syntax Changes +// +// OptionalTypes are unlike other CEL extensions because they modify the CEL +// syntax itself, notably through the use of a `?` preceding a field name or +// index value. +// +// ## Field Selection +// +// The optional syntax in field selection is denoted as `obj.?field`. In other +// words, if a field is set, return `optional.of(obj.field)“, else +// `optional.none()`. The optional field selection is viral in the sense that +// after the first optional selection all subsequent selections or indices +// are treated as optional, i.e. the following expressions are equivalent: +// +// obj.?field.subfield +// obj.?field.?subfield +// +// ## Indexing +// +// Similar to field selection, the optional syntax can be used in index +// expressions on maps and lists: +// +// list[?0] +// map[?key] +// +// ## Optional Field Setting +// +// When creating map or message literals, if a field may be optionally set +// based on its presence, then placing a `?` before the field name or key +// will ensure the type on the right-hand side must be optional(T) where T +// is the type of the field or key-value. +// +// The following returns a map with the key expression set only if the +// subfield is present, otherwise an empty map is created: +// +// {?key: obj.?field.subfield} +// +// ## Optional Element Setting +// +// When creating list literals, an element in the list may be optionally added +// when the element expression is preceded by a `?`: +// +// [a, ?b, ?c] // return a list with either [a], [a, b], [a, b, c], or [a, c] +// +// # Optional.Of +// +// Create an optional(T) value of a given value with type T. +// +// optional.of(10) +// +// # Optional.OfNonZeroValue +// +// Create an optional(T) value of a given value with type T if it is not a +// zero-value. A zero-value the default empty value for any given CEL type, +// including empty protobuf message types. If the value is empty, the result +// of this call will be optional.none(). +// +// optional.ofNonZeroValue([1, 2, 3]) // optional(list(int)) +// optional.ofNonZeroValue([]) // optional.none() +// optional.ofNonZeroValue(0) // optional.none() +// optional.ofNonZeroValue("") // optional.none() +// +// # Optional.None +// +// Create an empty optional value. +// +// # HasValue +// +// Determine whether the optional contains a value. +// +// optional.of(b'hello').hasValue() // true +// optional.ofNonZeroValue({}).hasValue() // false +// +// # Value +// +// Get the value contained by the optional. If the optional does not have a +// value, the result will be a CEL error. +// +// optional.of(b'hello').value() // b'hello' +// optional.ofNonZeroValue({}).value() // error +// +// # Or +// +// If the value on the left-hand side is optional.none(), the optional value +// on the right hand side is returned. If the value on the left-hand set is +// valued, then it is returned. This operation is short-circuiting and will +// only evaluate as many links in the `or` chain as are needed to return a +// non-empty optional value. +// +// obj.?field.or(m[?key]) +// l[?index].or(obj.?field.subfield).or(obj.?other) +// +// # OrValue +// +// Either return the value contained within the optional on the left-hand side +// or return the alternative value on the right hand side. +// +// m[?key].orValue("none") +// +// # OptMap +// +// Apply a transformation to the optional's underlying value if it is not empty +// and return an optional typed result based on the transformation. The +// transformation expression type must return a type T which is wrapped into +// an optional. +// +// msg.?elements.optMap(e, e.size()).orValue(0) +// +// # OptFlatMap +// +// Introduced in version: 1 +// +// Apply a transformation to the optional's underlying value if it is not empty +// and return the result. The transform expression must return an optional(T) +// rather than type T. This can be useful when dealing with zero values and +// conditionally generating an empty or non-empty result in ways which cannot +// be expressed with `optMap`. +// +// msg.?elements.optFlatMap(e, e[?0]) // return the first element if present. +func OptionalTypes(opts ...OptionalTypesOption) EnvOption { + lib := &optionalLib{version: math.MaxUint32} + for _, opt := range opts { + lib = opt(lib) + } + return Lib(lib) +} + +type optionalLib struct { + version uint32 +} + +// OptionalTypesOption is a functional interface for configuring the strings library. +type OptionalTypesOption func(*optionalLib) *optionalLib + +// OptionalTypesVersion configures the version of the optional type library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func OptionalTypesVersion(version uint32) OptionalTypesOption { + return func(lib *optionalLib) *optionalLib { + lib.version = version + return lib + } +} + +// LibraryName implements the SingletonLibrary interface method. +func (lib *optionalLib) LibraryName() string { + return "cel.lib.optional" +} + +// CompileOptions implements the Library interface method. +func (lib *optionalLib) CompileOptions() []EnvOption { + paramTypeK := TypeParamType("K") + paramTypeV := TypeParamType("V") + optionalTypeV := OptionalType(paramTypeV) + listTypeV := ListType(paramTypeV) + mapTypeKV := MapType(paramTypeK, paramTypeV) + + opts := []EnvOption{ + // Enable the optional syntax in the parser. + enableOptionalSyntax(), + + // Introduce the optional type. + Types(types.OptionalType), + + // Configure the optMap and optFlatMap macros. + Macros(ReceiverMacro(optMapMacro, 2, optMap)), + + // Global and member functions for working with optional values. + Function(optionalOfFunc, + Overload("optional_of", []*Type{paramTypeV}, optionalTypeV, + UnaryBinding(func(value ref.Val) ref.Val { + return types.OptionalOf(value) + }))), + Function(optionalOfNonZeroValueFunc, + Overload("optional_ofNonZeroValue", []*Type{paramTypeV}, optionalTypeV, + UnaryBinding(func(value ref.Val) ref.Val { + v, isZeroer := value.(traits.Zeroer) + if !isZeroer || !v.IsZeroValue() { + return types.OptionalOf(value) + } + return types.OptionalNone + }))), + Function(optionalNoneFunc, + Overload("optional_none", []*Type{}, optionalTypeV, + FunctionBinding(func(values ...ref.Val) ref.Val { + return types.OptionalNone + }))), + Function(valueFunc, + MemberOverload("optional_value", []*Type{optionalTypeV}, paramTypeV, + UnaryBinding(func(value ref.Val) ref.Val { + opt := value.(*types.Optional) + return opt.GetValue() + }))), + Function(hasValueFunc, + MemberOverload("optional_hasValue", []*Type{optionalTypeV}, BoolType, + UnaryBinding(func(value ref.Val) ref.Val { + opt := value.(*types.Optional) + return types.Bool(opt.HasValue()) + }))), + + // Implementation of 'or' and 'orValue' are special-cased to support short-circuiting in the + // evaluation chain. + Function("or", + MemberOverload("optional_or_optional", []*Type{optionalTypeV, optionalTypeV}, optionalTypeV)), + Function("orValue", + MemberOverload("optional_orValue_value", []*Type{optionalTypeV, paramTypeV}, paramTypeV)), + + // OptSelect is handled specially by the type-checker, so the receiver's field type is used to determine the + // optput type. + Function(operators.OptSelect, + Overload("select_optional_field", []*Type{DynType, StringType}, optionalTypeV)), + + // OptIndex is handled mostly like any other indexing operation on a list or map, so the type-checker can use + // these signatures to determine type-agreement without any special handling. + Function(operators.OptIndex, + Overload("list_optindex_optional_int", []*Type{listTypeV, IntType}, optionalTypeV), + Overload("optional_list_optindex_optional_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), + Overload("map_optindex_optional_value", []*Type{mapTypeKV, paramTypeK}, optionalTypeV), + Overload("optional_map_optindex_optional_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), + + // Index overloads to accommodate using an optional value as the operand. + Function(operators.Index, + Overload("optional_list_index_int", []*Type{OptionalType(listTypeV), IntType}, optionalTypeV), + Overload("optional_map_index_value", []*Type{OptionalType(mapTypeKV), paramTypeK}, optionalTypeV)), + } + if lib.version >= 1 { + opts = append(opts, Macros(ReceiverMacro(optFlatMapMacro, 2, optFlatMap))) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (lib *optionalLib) ProgramOptions() []ProgramOption { + return []ProgramOption{ + CustomDecorator(decorateOptionalOr), + } +} + +func optMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { + varIdent := args[0] + varName := "" + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() + default: + return nil, meh.NewError(varIdent.ID(), "optMap() variable name must be a simple identifier") + } + mapExpr := args[1] + return meh.NewCall( + operators.Conditional, + meh.NewMemberCall(hasValueFunc, target), + meh.NewCall(optionalOfFunc, + meh.NewComprehension( + meh.NewList(), + unusedIterVar, + varName, + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), + mapExpr, + ), + ), + meh.NewCall(optionalNoneFunc), + ), nil +} + +func optFlatMap(meh MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *Error) { + varIdent := args[0] + varName := "" + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() + default: + return nil, meh.NewError(varIdent.ID(), "optFlatMap() variable name must be a simple identifier") + } + mapExpr := args[1] + return meh.NewCall( + operators.Conditional, + meh.NewMemberCall(hasValueFunc, target), + meh.NewComprehension( + meh.NewList(), + unusedIterVar, + varName, + meh.NewMemberCall(valueFunc, meh.Copy(target)), + meh.NewLiteral(types.False), + meh.NewIdent(varName), + mapExpr, + ), + meh.NewCall(optionalNoneFunc), + ), nil +} + +func enableOptionalSyntax() EnvOption { + return func(e *Env) (*Env, error) { + e.prsrOpts = append(e.prsrOpts, parser.EnableOptionalSyntax(true)) + return e, nil + } +} + +// EnableErrorOnBadPresenceTest enables error generation when a presence test or optional field +// selection is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) EnvOption { + return features(featureEnableErrorOnBadPresenceTest, value) +} + +func decorateOptionalOr(i interpreter.Interpretable) (interpreter.Interpretable, error) { + call, ok := i.(interpreter.InterpretableCall) + if !ok { + return i, nil + } + args := call.Args() + if len(args) != 2 { + return i, nil + } + switch call.Function() { + case "or": + if call.OverloadID() != "" && call.OverloadID() != "optional_or_optional" { + return i, nil + } + return &evalOptionalOr{ + id: call.ID(), + lhs: args[0], + rhs: args[1], + }, nil + case "orValue": + if call.OverloadID() != "" && call.OverloadID() != "optional_orValue_value" { + return i, nil + } + return &evalOptionalOrValue{ + id: call.ID(), + lhs: args[0], + rhs: args[1], + }, nil + default: + return i, nil + } +} + +// evalOptionalOr selects between two optional values, either the first if it has a value, or +// the second optional expression is evaluated and returned. +type evalOptionalOr struct { + id int64 + lhs interpreter.Interpretable + rhs interpreter.Interpretable +} + +// ID implements the Interpretable interface method. +func (opt *evalOptionalOr) ID() int64 { + return opt.id +} + +// Eval evaluates the left-hand side optional to determine whether it contains a value, else +// proceeds with the right-hand side evaluation. +func (opt *evalOptionalOr) Eval(ctx interpreter.Activation) ref.Val { + // short-circuit lhs. + optLHS := opt.lhs.Eval(ctx) + optVal, ok := optLHS.(*types.Optional) + if !ok { + return optLHS + } + if optVal.HasValue() { + return optVal + } + return opt.rhs.Eval(ctx) +} + +// evalOptionalOrValue selects between an optional or a concrete value. If the optional has a value, +// its value is returned, otherwise the alternative value expression is evaluated and returned. +type evalOptionalOrValue struct { + id int64 + lhs interpreter.Interpretable + rhs interpreter.Interpretable +} + +// ID implements the Interpretable interface method. +func (opt *evalOptionalOrValue) ID() int64 { + return opt.id +} + +// Eval evaluates the left-hand side optional to determine whether it contains a value, else +// proceeds with the right-hand side evaluation. +func (opt *evalOptionalOrValue) Eval(ctx interpreter.Activation) ref.Val { + // short-circuit lhs. + optLHS := opt.lhs.Eval(ctx) + optVal, ok := optLHS.(*types.Optional) + if !ok { + return optLHS + } + if optVal.HasValue() { + return optVal.GetValue() + } + return opt.rhs.Eval(ctx) +} + +type timeUTCLibrary struct{} + +func (timeUTCLibrary) CompileOptions() []EnvOption { + return timeOverloadDeclarations +} + +func (timeUTCLibrary) ProgramOptions() []ProgramOption { + return []ProgramOption{} +} + +// Declarations and functions which enable using UTC on time.Time inputs when the timezone is unspecified +// in the CEL expression. +var ( + utcTZ = types.String("UTC") + + timeOverloadDeclarations = []EnvOption{ + Function(overloads.TimeGetHours, + MemberOverload(overloads.DurationToHours, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetHours))), + Function(overloads.TimeGetMinutes, + MemberOverload(overloads.DurationToMinutes, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetMinutes))), + Function(overloads.TimeGetSeconds, + MemberOverload(overloads.DurationToSeconds, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetSeconds))), + Function(overloads.TimeGetMilliseconds, + MemberOverload(overloads.DurationToMilliseconds, []*Type{DurationType}, IntType, + UnaryBinding(types.DurationGetMilliseconds))), + Function(overloads.TimeGetFullYear, + MemberOverload(overloads.TimestampToYear, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetFullYear(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToYearWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetFullYear), + ), + ), + Function(overloads.TimeGetMonth, + MemberOverload(overloads.TimestampToMonth, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetMonth(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToMonthWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetMonth), + ), + ), + Function(overloads.TimeGetDayOfYear, + MemberOverload(overloads.TimestampToDayOfYear, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfYear(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfYearWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(func(ts, tz ref.Val) ref.Val { + return timestampGetDayOfYear(ts, tz) + }), + ), + ), + Function(overloads.TimeGetDayOfMonth, + MemberOverload(overloads.TimestampToDayOfMonthZeroBased, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfMonthZeroBased(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetDayOfMonthZeroBased), + ), + ), + Function(overloads.TimeGetDate, + MemberOverload(overloads.TimestampToDayOfMonthOneBased, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfMonthOneBased(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetDayOfMonthOneBased), + ), + ), + Function(overloads.TimeGetDayOfWeek, + MemberOverload(overloads.TimestampToDayOfWeek, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetDayOfWeek(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToDayOfWeekWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetDayOfWeek), + ), + ), + Function(overloads.TimeGetHours, + MemberOverload(overloads.TimestampToHours, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetHours(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToHoursWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetHours), + ), + ), + Function(overloads.TimeGetMinutes, + MemberOverload(overloads.TimestampToMinutes, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetMinutes(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToMinutesWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetMinutes), + ), + ), + Function(overloads.TimeGetSeconds, + MemberOverload(overloads.TimestampToSeconds, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetSeconds(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToSecondsWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetSeconds), + ), + ), + Function(overloads.TimeGetMilliseconds, + MemberOverload(overloads.TimestampToMilliseconds, []*Type{TimestampType}, IntType, + UnaryBinding(func(ts ref.Val) ref.Val { + return timestampGetMilliseconds(ts, utcTZ) + }), + ), + MemberOverload(overloads.TimestampToMillisecondsWithTz, []*Type{TimestampType, StringType}, IntType, + BinaryBinding(timestampGetMilliseconds), + ), + ), + } +) + +func timestampGetFullYear(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Year()) +} + +func timestampGetMonth(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + // CEL spec indicates that the month should be 0-based, but the Time value + // for Month() is 1-based. + return types.Int(t.Month() - 1) +} + +func timestampGetDayOfYear(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.YearDay() - 1) +} + +func timestampGetDayOfMonthZeroBased(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Day() - 1) +} + +func timestampGetDayOfMonthOneBased(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Day()) +} + +func timestampGetDayOfWeek(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Weekday()) +} + +func timestampGetHours(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Hour()) +} + +func timestampGetMinutes(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Minute()) +} + +func timestampGetSeconds(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Second()) +} + +func timestampGetMilliseconds(ts, tz ref.Val) ref.Val { + t, err := inTimeZone(ts, tz) + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(t.Nanosecond() / 1000000) +} + +func inTimeZone(ts, tz ref.Val) (time.Time, error) { + t := ts.(types.Timestamp) + val := string(tz.(types.String)) + ind := strings.Index(val, ":") + if ind == -1 { + loc, err := time.LoadLocation(val) + if err != nil { + return time.Time{}, err + } + return t.In(loc), nil + } + + // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC + // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes. + hr, err := strconv.Atoi(string(val[0:ind])) + if err != nil { + return time.Time{}, err + } + min, err := strconv.Atoi(string(val[ind+1:])) + if err != nil { + return time.Time{}, err + } + var offset int + if string(val[0]) == "-" { + offset = hr*60 - min + } else { + offset = hr*60 + min + } + secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds()) + timezone := time.FixedZone("", secondsEastOfUTC) + return t.In(timezone), nil +} diff --git a/vendor/github.com/google/cel-go/cel/macro.go b/vendor/github.com/google/cel-go/cel/macro.go new file mode 100644 index 00000000..4db1fd57 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/macro.go @@ -0,0 +1,576 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Macro describes a function signature to match and the MacroExpander to apply. +// +// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function, +// a Macro should be created per arg-count or as a var arg macro. +type Macro = parser.Macro + +// MacroFactory defines an expansion function which converts a call and its arguments to a cel.Expr value. +type MacroFactory = parser.MacroExpander + +// MacroExprFactory assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. +type MacroExprFactory = parser.ExprHelper + +// MacroExpander converts a call and its associated arguments into a protobuf Expr representation. +// +// If the MacroExpander determines within the implementation that an expansion is not needed it may return +// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments +// are not well-formed, the result of the expansion will be an error. +// +// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call +// and produces as output an Expr ast node. +// +// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. +type MacroExpander func(eh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) + +// MacroExprHelper exposes helper methods for creating new expressions within a CEL abstract syntax tree. +// ExprHelper assists with the manipulation of proto-based Expr values in a manner which is +// consistent with the source position and expression id generation code leveraged by both +// the parser and type-checker. +type MacroExprHelper interface { + // Copy the input expression with a brand new set of identifiers. + Copy(*exprpb.Expr) *exprpb.Expr + + // LiteralBool creates an Expr value for a bool literal. + LiteralBool(value bool) *exprpb.Expr + + // LiteralBytes creates an Expr value for a byte literal. + LiteralBytes(value []byte) *exprpb.Expr + + // LiteralDouble creates an Expr value for double literal. + LiteralDouble(value float64) *exprpb.Expr + + // LiteralInt creates an Expr value for an int literal. + LiteralInt(value int64) *exprpb.Expr + + // LiteralString creates am Expr value for a string literal. + LiteralString(value string) *exprpb.Expr + + // LiteralUint creates an Expr value for a uint literal. + LiteralUint(value uint64) *exprpb.Expr + + // NewList creates a CreateList instruction where the list is comprised of the optional set + // of elements provided as arguments. + NewList(elems ...*exprpb.Expr) *exprpb.Expr + + // NewMap creates a CreateStruct instruction for a map where the map is comprised of the + // optional set of key, value entries. + NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewMapEntry creates a Map Entry for the key, value pair. + NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // NewObject creates a CreateStruct instruction for an object with a given type name and + // optional set of field initializers. + NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr + + // NewObjectFieldInit creates a new Object field initializer from the field name and value. + NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry + + // Fold creates a fold comprehension instruction. + // + // - iterVar is the iteration variable name. + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr + + // Ident creates an identifier Expr value. + Ident(name string) *exprpb.Expr + + // AccuIdent returns an accumulator identifier for use with comprehension results. + AccuIdent() *exprpb.Expr + + // GlobalCall creates a function call Expr value for a global (free) function. + GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr + + // ReceiverCall creates a function call Expr value for a receiver-style function. + ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr + + // PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr + + // Select create a field traversal Expr value. + Select(operand *exprpb.Expr, field string) *exprpb.Expr + + // OffsetLocation returns the Location of the expression identifier. + OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *Error +} + +// GlobalMacro creates a Macro for a global function with the specified arg count. +func GlobalMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewGlobalMacro(function, argCount, factory) +} + +// ReceiverMacro creates a Macro for a receiver function matching the specified arg count. +func ReceiverMacro(function string, argCount int, factory MacroFactory) Macro { + return parser.NewReceiverMacro(function, argCount, factory) +} + +// GlobalVarArgMacro creates a Macro for a global function with a variable arg count. +func GlobalVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewGlobalVarArgMacro(function, factory) +} + +// ReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +func ReceiverVarArgMacro(function string, factory MacroFactory) Macro { + return parser.NewReceiverVarArgMacro(function, factory) +} + +// NewGlobalMacro creates a Macro for a global function with the specified arg count. +// +// Deprecated: use GlobalMacro +func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewGlobalMacro(function, argCount, expand.Expander) +} + +// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count. +// +// Deprecated: use ReceiverMacro +func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewReceiverMacro(function, argCount, expand.Expander) +} + +// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count. +// +// Deprecated: use GlobalVarArgMacro +func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewGlobalVarArgMacro(function, expand.Expander) +} + +// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +// +// Deprecated: use ReceiverVarArgMacro +func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { + expand := adaptingExpander{expander} + return parser.NewReceiverVarArgMacro(function, expand.Expander) +} + +// HasMacroExpander expands the input call arguments into a presence test, e.g. has(.field) +func HasMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + arg, err := adaptToExpr(args[0]) + if err != nil { + return nil, err + } + if arg.Kind() == ast.SelectKind { + s := arg.AsSelect() + return adaptToProto(ph.NewPresenceTest(s.Operand(), s.FieldName())) + } + return nil, ph.NewError(arg.ID(), "invalid argument to has() macro") +} + +// ExistsMacroExpander expands the input call arguments into a comprehension that returns true if any of the +// elements in the range match the predicate expressions: +// .exists(, ) +func ExistsMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExists(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +// ExistsOneMacroExpander expands the input call arguments into a comprehension that returns true if exactly +// one of the elements in the range match the predicate expressions: +// .exists_one(, ) +func ExistsOneMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeExistsOne(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +// MapMacroExpander expands the input call arguments into a comprehension that transforms each element in the +// input to produce an output list. +// +// There are two call patterns supported by map: +// +// .map(, ) +// .map(, , ) +// +// In the second form only iterVar values which return true when provided to the predicate expression +// are transformed. +func MapMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeMap(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +// FilterMacroExpander expands the input call arguments into a comprehension which produces a list which contains +// only elements which match the provided predicate expression: +// .filter(, ) +func FilterMacroExpander(meh MacroExprHelper, target *exprpb.Expr, args []*exprpb.Expr) (*exprpb.Expr, *Error) { + ph, err := toParserHelper(meh) + if err != nil { + return nil, err + } + out, err := parser.MakeFilter(ph, mustAdaptToExpr(target), mustAdaptToExprs(args)) + if err != nil { + return nil, err + } + return adaptToProto(out) +} + +var ( + // Aliases to each macro in the CEL standard environment. + // Note: reassigning these macro variables may result in undefined behavior. + + // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to + // specify the field as a string. + HasMacro = parser.HasMacro + + // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all + // elements in the range satisfy the predicate. + AllMacro = parser.AllMacro + + // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that + // some element in the range satisfies the predicate. + ExistsMacro = parser.ExistsMacro + + // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one + // element in range the predicate holds. + ExistsOneMacro = parser.ExistsOneMacro + + // MapMacro expands "range.map(var, function)" into a comprehension which applies the function + // to each element in the range to produce a new list. + MapMacro = parser.MapMacro + + // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which + // first filters the elements in the range by the predicate, then applies the transform function + // to produce a new list. + MapFilterMacro = parser.MapFilterMacro + + // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters + // elements in the range, producing a new list from the elements that satisfy the predicate. + FilterMacro = parser.FilterMacro + + // StandardMacros provides an alias to all the CEL macros defined in the standard environment. + StandardMacros = []Macro{ + HasMacro, AllMacro, ExistsMacro, ExistsOneMacro, MapMacro, MapFilterMacro, FilterMacro, + } + + // NoMacros provides an alias to an empty list of macros + NoMacros = []Macro{} +) + +type adaptingExpander struct { + legacyExpander MacroExpander +} + +func (adapt *adaptingExpander) Expander(eh parser.ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + var legacyTarget *exprpb.Expr = nil + var err *Error = nil + if target != nil { + legacyTarget, err = adaptToProto(target) + if err != nil { + return nil, err + } + } + legacyArgs := make([]*exprpb.Expr, len(args)) + for i, arg := range args { + legacyArgs[i], err = adaptToProto(arg) + if err != nil { + return nil, err + } + } + ah := &adaptingHelper{modernHelper: eh} + legacyExpr, err := adapt.legacyExpander(ah, legacyTarget, legacyArgs) + if err != nil { + return nil, err + } + ex, err := adaptToExpr(legacyExpr) + if err != nil { + return nil, err + } + return ex, nil +} + +func wrapErr(id int64, message string, err error) *common.Error { + return &common.Error{ + Location: common.NoLocation, + Message: fmt.Sprintf("%s: %v", message, err), + ExprID: id, + } +} + +type adaptingHelper struct { + modernHelper parser.ExprHelper +} + +// Copy the input expression with a brand new set of identifiers. +func (ah *adaptingHelper) Copy(e *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.Copy(mustAdaptToExpr(e))) +} + +// LiteralBool creates an Expr value for a bool literal. +func (ah *adaptingHelper) LiteralBool(value bool) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bool(value))) +} + +// LiteralBytes creates an Expr value for a byte literal. +func (ah *adaptingHelper) LiteralBytes(value []byte) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Bytes(value))) +} + +// LiteralDouble creates an Expr value for double literal. +func (ah *adaptingHelper) LiteralDouble(value float64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Double(value))) +} + +// LiteralInt creates an Expr value for an int literal. +func (ah *adaptingHelper) LiteralInt(value int64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Int(value))) +} + +// LiteralString creates am Expr value for a string literal. +func (ah *adaptingHelper) LiteralString(value string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.String(value))) +} + +// LiteralUint creates an Expr value for a uint literal. +func (ah *adaptingHelper) LiteralUint(value uint64) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewLiteral(types.Uint(value))) +} + +// NewList creates a CreateList instruction where the list is comprised of the optional set +// of elements provided as arguments. +func (ah *adaptingHelper) NewList(elems ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewList(mustAdaptToExprs(elems)...)) +} + +// NewMap creates a CreateStruct instruction for a map where the map is comprised of the +// optional set of key, value entries. +func (ah *adaptingHelper) NewMap(entries ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(entries)) + for i, e := range entries { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewMap(adaptedEntries...)) +} + +// NewMapEntry creates a Map Entry for the key, value pair. +func (ah *adaptingHelper) NewMapEntry(key *exprpb.Expr, val *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewMapEntry(mustAdaptToExpr(key), mustAdaptToExpr(val), optional)) +} + +// NewObject creates a CreateStruct instruction for an object with a given type name and +// optional set of field initializers. +func (ah *adaptingHelper) NewObject(typeName string, fieldInits ...*exprpb.Expr_CreateStruct_Entry) *exprpb.Expr { + adaptedEntries := make([]ast.EntryExpr, len(fieldInits)) + for i, e := range fieldInits { + adaptedEntries[i] = mustAdaptToEntryExpr(e) + } + return mustAdaptToProto(ah.modernHelper.NewStruct(typeName, adaptedEntries...)) +} + +// NewObjectFieldInit creates a new Object field initializer from the field name and value. +func (ah *adaptingHelper) NewObjectFieldInit(field string, init *exprpb.Expr, optional bool) *exprpb.Expr_CreateStruct_Entry { + return mustAdaptToProtoEntry( + ah.modernHelper.NewStructField(field, mustAdaptToExpr(init), optional)) +} + +// Fold creates a fold comprehension instruction. +// +// - iterVar is the iteration variable name. +// - iterRange represents the expression that resolves to a list or map where the elements or +// keys (respectively) will be iterated over. +// - accuVar is the accumulation variable name, typically parser.AccumulatorName. +// - accuInit is the initial expression whose value will be set for the accuVar prior to +// folding. +// - condition is the expression to test to determine whether to continue folding. +// - step is the expression to evaluation at the conclusion of a single fold iteration. +// - result is the computation to evaluate at the conclusion of the fold. +// +// The accuVar should not shadow variable names that you would like to reference within the +// environment in the step and condition expressions. Presently, the name __result__ is commonly +// used by built-in macros but this may change in the future. +func (ah *adaptingHelper) Fold(iterVar string, + iterRange *exprpb.Expr, + accuVar string, + accuInit *exprpb.Expr, + condition *exprpb.Expr, + step *exprpb.Expr, + result *exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewComprehension( + mustAdaptToExpr(iterRange), + iterVar, + accuVar, + mustAdaptToExpr(accuInit), + mustAdaptToExpr(condition), + mustAdaptToExpr(step), + mustAdaptToExpr(result), + ), + ) +} + +// Ident creates an identifier Expr value. +func (ah *adaptingHelper) Ident(name string) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewIdent(name)) +} + +// AccuIdent returns an accumulator identifier for use with comprehension results. +func (ah *adaptingHelper) AccuIdent() *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewAccuIdent()) +} + +// GlobalCall creates a function call Expr value for a global (free) function. +func (ah *adaptingHelper) GlobalCall(function string, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto(ah.modernHelper.NewCall(function, mustAdaptToExprs(args)...)) +} + +// ReceiverCall creates a function call Expr value for a receiver-style function. +func (ah *adaptingHelper) ReceiverCall(function string, target *exprpb.Expr, args ...*exprpb.Expr) *exprpb.Expr { + return mustAdaptToProto( + ah.modernHelper.NewMemberCall(function, mustAdaptToExpr(target), mustAdaptToExprs(args)...)) +} + +// PresenceTest creates a Select TestOnly Expr value for modelling has() semantics. +func (ah *adaptingHelper) PresenceTest(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewPresenceTest(op, field)) +} + +// Select create a field traversal Expr value. +func (ah *adaptingHelper) Select(operand *exprpb.Expr, field string) *exprpb.Expr { + op := mustAdaptToExpr(operand) + return mustAdaptToProto(ah.modernHelper.NewSelect(op, field)) +} + +// OffsetLocation returns the Location of the expression identifier. +func (ah *adaptingHelper) OffsetLocation(exprID int64) common.Location { + return ah.modernHelper.OffsetLocation(exprID) +} + +// NewError associates an error message with a given expression id. +func (ah *adaptingHelper) NewError(exprID int64, message string) *Error { + return ah.modernHelper.NewError(exprID, message) +} + +func mustAdaptToExprs(exprs []*exprpb.Expr) []ast.Expr { + adapted := make([]ast.Expr, len(exprs)) + for i, e := range exprs { + adapted[i] = mustAdaptToExpr(e) + } + return adapted +} + +func mustAdaptToExpr(e *exprpb.Expr) ast.Expr { + out, _ := adaptToExpr(e) + return out +} + +func adaptToExpr(e *exprpb.Expr) (ast.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ProtoToExpr(e) + if err != nil { + return nil, wrapErr(e.GetId(), "proto conversion failure", err) + } + return out, nil +} + +func mustAdaptToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) ast.EntryExpr { + out, _ := ast.ProtoToEntryExpr(e) + return out +} + +func mustAdaptToProto(e ast.Expr) *exprpb.Expr { + out, _ := adaptToProto(e) + return out +} + +func adaptToProto(e ast.Expr) (*exprpb.Expr, *Error) { + if e == nil { + return nil, nil + } + out, err := ast.ExprToProto(e) + if err != nil { + return nil, wrapErr(e.ID(), "expr conversion failure", err) + } + return out, nil +} + +func mustAdaptToProtoEntry(e ast.EntryExpr) *exprpb.Expr_CreateStruct_Entry { + out, _ := ast.EntryExprToProto(e) + return out +} + +func toParserHelper(meh MacroExprHelper) (parser.ExprHelper, *Error) { + ah, ok := meh.(*adaptingHelper) + if !ok { + return nil, common.NewError(0, + fmt.Sprintf("unsupported macro helper: %v (%T)", meh, meh), + common.NoLocation) + } + return ah.modernHelper, nil +} diff --git a/vendor/github.com/google/cel-go/cel/optimizer.go b/vendor/github.com/google/cel-go/cel/optimizer.go new file mode 100644 index 00000000..ec02773a --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/optimizer.go @@ -0,0 +1,525 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "sort" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// StaticOptimizer contains a sequence of ASTOptimizer instances which will be applied in order. +// +// The static optimizer normalizes expression ids and type-checking run between optimization +// passes to ensure that the final optimized output is a valid expression with metadata consistent +// with what would have been generated from a parsed and checked expression. +// +// Note: source position information is best-effort and likely wrong, but optimized expressions +// should be suitable for calls to parser.Unparse. +type StaticOptimizer struct { + optimizers []ASTOptimizer +} + +// NewStaticOptimizer creates a StaticOptimizer with a sequence of ASTOptimizer's to be applied +// to a checked expression. +func NewStaticOptimizer(optimizers ...ASTOptimizer) *StaticOptimizer { + return &StaticOptimizer{ + optimizers: optimizers, + } +} + +// Optimize applies a sequence of optimizations to an Ast within a given environment. +// +// If issues are encountered, the Issues.Err() return value will be non-nil. +func (opt *StaticOptimizer) Optimize(env *Env, a *Ast) (*Ast, *Issues) { + // Make a copy of the AST to be optimized. + optimized := ast.Copy(a.impl) + ids := newIDGenerator(ast.MaxID(a.impl)) + + // Create the optimizer context, could be pooled in the future. + issues := NewIssues(common.NewErrors(a.Source())) + baseFac := ast.NewExprFactory() + exprFac := &optimizerExprFactory{ + idGenerator: ids, + fac: baseFac, + sourceInfo: optimized.SourceInfo(), + } + ctx := &OptimizerContext{ + optimizerExprFactory: exprFac, + Env: env, + Issues: issues, + } + + // Apply the optimizations sequentially. + for _, o := range opt.optimizers { + optimized = o.Optimize(ctx, optimized) + if issues.Err() != nil { + return nil, issues + } + // Normalize expression id metadata including coordination with macro call metadata. + freshIDGen := newIDGenerator(0) + info := optimized.SourceInfo() + expr := optimized.Expr() + normalizeIDs(freshIDGen.renumberStable, expr, info) + cleanupMacroRefs(expr, info) + + // Recheck the updated expression for any possible type-agreement or validation errors. + parsed := &Ast{ + source: a.Source(), + impl: ast.NewAST(expr, info)} + checked, iss := ctx.Check(parsed) + if iss.Err() != nil { + return nil, iss + } + optimized = checked.impl + } + + // Return the optimized result. + return &Ast{ + source: a.Source(), + impl: optimized, + }, nil +} + +// normalizeIDs ensures that the metadata present with an AST is reset in a manner such +// that the ids within the expression correspond to the ids within macros. +func normalizeIDs(idGen ast.IDGenerator, optimized ast.Expr, info *ast.SourceInfo) { + optimized.RenumberIDs(idGen) + if len(info.MacroCalls()) == 0 { + return + } + + // Sort the macro ids to make sure that the renumbering of macro-specific variables + // is stable across normalization calls. + sortedMacroIDs := []int64{} + for id := range info.MacroCalls() { + sortedMacroIDs = append(sortedMacroIDs, id) + } + sort.Slice(sortedMacroIDs, func(i, j int) bool { return sortedMacroIDs[i] < sortedMacroIDs[j] }) + + // First, update the macro call ids themselves. + callIDMap := map[int64]int64{} + for _, id := range sortedMacroIDs { + callIDMap[id] = idGen(id) + } + // Then update the macro call definitions which refer to these ids, but + // ensure that the updates don't collide and remove macro entries which haven't + // been visited / updated yet. + type macroUpdate struct { + id int64 + call ast.Expr + } + macroUpdates := []macroUpdate{} + for _, oldID := range sortedMacroIDs { + newID := callIDMap[oldID] + call, found := info.GetMacroCall(oldID) + if !found { + continue + } + call.RenumberIDs(idGen) + macroUpdates = append(macroUpdates, macroUpdate{id: newID, call: call}) + info.ClearMacroCall(oldID) + } + for _, u := range macroUpdates { + info.SetMacroCall(u.id, u.call) + } +} + +func cleanupMacroRefs(expr ast.Expr, info *ast.SourceInfo) { + if len(info.MacroCalls()) == 0 { + return + } + + // Sanitize the macro call references once the optimized expression has been computed + // and the ids normalized between the expression and the macros. + exprRefMap := make(map[int64]struct{}) + ast.PostOrderVisit(expr, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + // Update the macro call id references to ensure that macro pointers are + // updated consistently across macros. + for _, call := range info.MacroCalls() { + ast.PostOrderVisit(call, ast.NewExprVisitor(func(e ast.Expr) { + if e.ID() == 0 { + return + } + exprRefMap[e.ID()] = struct{}{} + })) + } + for id := range info.MacroCalls() { + if _, found := exprRefMap[id]; !found { + info.ClearMacroCall(id) + } + } +} + +// newIDGenerator ensures that new ids are only created the first time they are encountered. +func newIDGenerator(seed int64) *idGenerator { + return &idGenerator{ + idMap: make(map[int64]int64), + seed: seed, + } +} + +type idGenerator struct { + idMap map[int64]int64 + seed int64 +} + +func (gen *idGenerator) nextID() int64 { + gen.seed++ + return gen.seed +} + +func (gen *idGenerator) renumberStable(id int64) int64 { + if id == 0 { + return 0 + } + if newID, found := gen.idMap[id]; found { + return newID + } + nextID := gen.nextID() + gen.idMap[id] = nextID + return nextID +} + +// OptimizerContext embeds Env and Issues instances to make it easy to type-check and evaluate +// subexpressions and report any errors encountered along the way. The context also embeds the +// optimizerExprFactory which can be used to generate new sub-expressions with expression ids +// consistent with the expectations of a parsed expression. +type OptimizerContext struct { + *Env + *optimizerExprFactory + *Issues +} + +// ASTOptimizer applies an optimization over an AST and returns the optimized result. +type ASTOptimizer interface { + // Optimize optimizes a type-checked AST within an Environment and accumulates any issues. + Optimize(*OptimizerContext, *ast.AST) *ast.AST +} + +type optimizerExprFactory struct { + *idGenerator + fac ast.ExprFactory + sourceInfo *ast.SourceInfo +} + +// NewAST creates an AST from the current expression using the tracked source info which +// is modified and managed by the OptimizerContext. +func (opt *optimizerExprFactory) NewAST(expr ast.Expr) *ast.AST { + return ast.NewAST(expr, opt.sourceInfo) +} + +// CopyAST creates a renumbered copy of `Expr` and `SourceInfo` values of the input AST, where the +// renumbering uses the same scheme as the core optimizer logic ensuring there are no collisions +// between copies. +// +// Use this method before attempting to merge the expression from AST into another. +func (opt *optimizerExprFactory) CopyAST(a *ast.AST) (ast.Expr, *ast.SourceInfo) { + idGen := newIDGenerator(opt.nextID()) + defer func() { opt.seed = idGen.nextID() }() + copyExpr := opt.fac.CopyExpr(a.Expr()) + copyInfo := ast.CopySourceInfo(a.SourceInfo()) + normalizeIDs(idGen.renumberStable, copyExpr, copyInfo) + return copyExpr, copyInfo +} + +// CopyASTAndMetadata copies the input AST and propagates the macro metadata into the AST being +// optimized. +func (opt *optimizerExprFactory) CopyASTAndMetadata(a *ast.AST) ast.Expr { + copyExpr, copyInfo := opt.CopyAST(a) + for macroID, call := range copyInfo.MacroCalls() { + opt.SetMacroCall(macroID, call) + } + return copyExpr +} + +// ClearMacroCall clears the macro at the given expression id. +func (opt *optimizerExprFactory) ClearMacroCall(id int64) { + opt.sourceInfo.ClearMacroCall(id) +} + +// SetMacroCall sets the macro call metadata for the given macro id within the tracked source info +// metadata. +func (opt *optimizerExprFactory) SetMacroCall(id int64, expr ast.Expr) { + opt.sourceInfo.SetMacroCall(id, expr) +} + +// MacroCalls returns the map of macro calls currently in the context. +func (opt *optimizerExprFactory) MacroCalls() map[int64]ast.Expr { + return opt.sourceInfo.MacroCalls() +} + +// NewBindMacro creates an AST expression representing the expanded bind() macro, and a macro expression +// representing the unexpanded call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewBindMacro(macroID int64, varName string, varInit, remaining ast.Expr) (astExpr, macroExpr ast.Expr) { + varID := opt.nextID() + remainingID := opt.nextID() + remaining = opt.fac.CopyExpr(remaining) + remaining.RenumberIDs(func(id int64) int64 { + if id == macroID { + return remainingID + } + return id + }) + if call, exists := opt.sourceInfo.GetMacroCall(macroID); exists { + opt.SetMacroCall(remainingID, opt.fac.CopyExpr(call)) + } + + astExpr = opt.fac.NewComprehension(macroID, + opt.fac.NewList(opt.nextID(), []ast.Expr{}, []int32{}), + "#unused", + varName, + opt.fac.CopyExpr(varInit), + opt.fac.NewLiteral(opt.nextID(), types.False), + opt.fac.NewIdent(varID, varName), + remaining) + + macroExpr = opt.fac.NewMemberCall(0, "bind", + opt.fac.NewIdent(opt.nextID(), "cel"), + opt.fac.NewIdent(varID, varName), + opt.fac.CopyExpr(varInit), + opt.fac.CopyExpr(remaining)) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewCall creates a global function call invocation expression. +// +// Example: +// +// countByField(list, fieldName) +// - function: countByField +// - args: [list, fieldName] +func (opt *optimizerExprFactory) NewCall(function string, args ...ast.Expr) ast.Expr { + return opt.fac.NewCall(opt.nextID(), function, args...) +} + +// NewMemberCall creates a member function call invocation expression where 'target' is the receiver of the call. +// +// Example: +// +// list.countByField(fieldName) +// - function: countByField +// - target: list +// - args: [fieldName] +func (opt *optimizerExprFactory) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return opt.fac.NewMemberCall(opt.nextID(), function, target, args...) +} + +// NewIdent creates a new identifier expression. +// +// Examples: +// +// - simple_var_name +// - qualified.subpackage.var_name +func (opt *optimizerExprFactory) NewIdent(name string) ast.Expr { + return opt.fac.NewIdent(opt.nextID(), name) +} + +// NewLiteral creates a new literal expression value. +// +// The range of valid values for a literal generated during optimization is different than for expressions +// generated via parsing / type-checking, as the ref.Val may be _any_ CEL value so long as the value can +// be converted back to a literal-like form. +func (opt *optimizerExprFactory) NewLiteral(value ref.Val) ast.Expr { + return opt.fac.NewLiteral(opt.nextID(), value) +} + +// NewList creates a list expression with a set of optional indices. +// +// Examples: +// +// [a, b] +// - elems: [a, b] +// - optIndices: [] +// +// [a, ?b, ?c] +// - elems: [a, b, c] +// - optIndices: [1, 2] +func (opt *optimizerExprFactory) NewList(elems []ast.Expr, optIndices []int32) ast.Expr { + return opt.fac.NewList(opt.nextID(), elems, optIndices) +} + +// NewMap creates a map from a set of entry expressions which contain a key and value expression. +func (opt *optimizerExprFactory) NewMap(entries []ast.EntryExpr) ast.Expr { + return opt.fac.NewMap(opt.nextID(), entries) +} + +// NewMapEntry creates a map entry with a key and value expression and a flag to indicate whether the +// entry is optional. +// +// Examples: +// +// {a: b} +// - key: a +// - value: b +// - optional: false +// +// {?a: ?b} +// - key: a +// - value: b +// - optional: true +func (opt *optimizerExprFactory) NewMapEntry(key, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewMapEntry(opt.nextID(), key, value, isOptional) +} + +// NewHasMacro generates a test-only select expression to be included within an AST and an unexpanded +// has() macro call signature to be inserted into the source info macro call metadata. +func (opt *optimizerExprFactory) NewHasMacro(macroID int64, s ast.Expr) (astExpr, macroExpr ast.Expr) { + sel := s.AsSelect() + astExpr = opt.fac.NewPresenceTest(macroID, sel.Operand(), sel.FieldName()) + macroExpr = opt.fac.NewCall(0, "has", + opt.NewSelect(opt.fac.CopyExpr(sel.Operand()), sel.FieldName())) + opt.sanitizeMacro(macroID, macroExpr) + return +} + +// NewSelect creates a select expression where a field value is selected from an operand. +// +// Example: +// +// msg.field_name +// - operand: msg +// - field: field_name +func (opt *optimizerExprFactory) NewSelect(operand ast.Expr, field string) ast.Expr { + return opt.fac.NewSelect(opt.nextID(), operand, field) +} + +// NewStruct creates a new typed struct value with an set of field initializations. +// +// Example: +// +// pkg.TypeName{field: value} +// - typeName: pkg.TypeName +// - fields: [{field: value}] +func (opt *optimizerExprFactory) NewStruct(typeName string, fields []ast.EntryExpr) ast.Expr { + return opt.fac.NewStruct(opt.nextID(), typeName, fields) +} + +// NewStructField creates a struct field initialization. +// +// Examples: +// +// {count: 3u} +// - field: count +// - value: 3u +// - optional: false +// +// {?count: x} +// - field: count +// - value: x +// - optional: true +func (opt *optimizerExprFactory) NewStructField(field string, value ast.Expr, isOptional bool) ast.EntryExpr { + return opt.fac.NewStructField(opt.nextID(), field, value, isOptional) +} + +// UpdateExpr updates the target expression with the updated content while preserving macro metadata. +// +// There are four scenarios during the update to consider: +// 1. target is not macro, updated is not macro +// 2. target is macro, updated is not macro +// 3. target is macro, updated is macro +// 4. target is not macro, updated is macro +// +// When the target is a macro already, it may either be updated to a new macro function +// body if the update is also a macro, or it may be removed altogether if the update is +// a macro. +// +// When the update is a macro, then the target references within other macros must be +// updated to point to the new updated macro. Otherwise, other macros which pointed to +// the target body must be replaced with copies of the updated expression body. +func (opt *optimizerExprFactory) UpdateExpr(target, updated ast.Expr) { + // Update the expression + target.SetKindCase(updated) + + // Early return if there's no macros present sa the source info reflects the + // macro set from the target and updated expressions. + if len(opt.sourceInfo.MacroCalls()) == 0 { + return + } + // Determine whether the target expression was a macro. + _, targetIsMacro := opt.sourceInfo.GetMacroCall(target.ID()) + + // Determine whether the updated expression was a macro. + updatedMacro, updatedIsMacro := opt.sourceInfo.GetMacroCall(updated.ID()) + + if updatedIsMacro { + // If the updated call was a macro, then updated id maps to target id, + // and the updated macro moves into the target id slot. + opt.sourceInfo.ClearMacroCall(updated.ID()) + opt.sourceInfo.SetMacroCall(target.ID(), updatedMacro) + } else if targetIsMacro { + // Otherwise if the target expr was a macro, but is no longer, clear + // the macro reference. + opt.sourceInfo.ClearMacroCall(target.ID()) + } + + // Punch holes in the updated value where macros references exist. + macroExpr := opt.fac.CopyExpr(target) + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) + + // Update any references to the expression within a macro + macroVisitor := ast.NewExprVisitor(func(call ast.Expr) { + // Update the target expression to point to the macro expression which + // will be empty if the updated expression was a macro. + if call.ID() == target.ID() { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Update the macro call expression if it refers to the updated expression + // id which has since been remapped to the target id. + if call.ID() == updated.ID() { + // Either ensure the expression is a macro reference or a populated with + // the relevant sub-expression if the updated expr was not a macro. + if updatedIsMacro { + call.SetKindCase(nil) + } else { + call.SetKindCase(opt.fac.CopyExpr(macroExpr)) + } + // Since SetKindCase does not renumber the id, ensure the references to + // the old 'updated' id are mapped to the target id. + call.RenumberIDs(func(id int64) int64 { + if id == updated.ID() { + return target.ID() + } + return id + }) + } + }) + for _, call := range opt.sourceInfo.MacroCalls() { + ast.PostOrderVisit(call, macroVisitor) + } +} + +func (opt *optimizerExprFactory) sanitizeMacro(macroID int64, macroExpr ast.Expr) { + macroRefVisitor := ast.NewExprVisitor(func(e ast.Expr) { + if _, exists := opt.sourceInfo.GetMacroCall(e.ID()); exists && e.ID() != macroID { + e.SetKindCase(nil) + } + }) + ast.PostOrderVisit(macroExpr, macroRefVisitor) +} diff --git a/vendor/github.com/google/cel-go/cel/options.go b/vendor/github.com/google/cel-go/cel/options.go new file mode 100644 index 00000000..69c69426 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/options.go @@ -0,0 +1,667 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/dynamicpb" + + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter" + "github.com/google/cel-go/parser" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + descpb "google.golang.org/protobuf/types/descriptorpb" +) + +// These constants beginning with "Feature" enable optional behavior in +// the library. See the documentation for each constant to see its +// effects, compatibility restrictions, and standard conformance. +const ( + _ = iota + + // Enable the tracking of function call expressions replaced by macros. + featureEnableMacroCallTracking + + // Enable the use of cross-type numeric comparisons at the type-checker. + featureCrossTypeNumericComparisons + + // Enable eager validation of declarations to ensure that Env values created + // with `Extend` inherit a validated list of declarations from the parent Env. + featureEagerlyValidateDeclarations + + // Enable the use of the default UTC timezone when a timezone is not specified + // on a CEL timestamp operation. This fixes the scenario where the input time + // is not already in UTC. + featureDefaultUTCTimeZone + + // Enable the serialization of logical operator ASTs as variadic calls, thus + // compressing the logic graph to a single call when multiple like-operator + // expressions occur: e.g. a && b && c && d -> call(_&&_, [a, b, c, d]) + featureVariadicLogicalASTs + + // Enable error generation when a presence test or optional field selection is + // performed on a primitive type. + featureEnableErrorOnBadPresenceTest +) + +// EnvOption is a functional interface for configuring the environment. +type EnvOption func(e *Env) (*Env, error) + +// ClearMacros options clears all parser macros. +// +// Clearing macros will ensure CEL expressions can only contain linear evaluation paths, as +// comprehensions such as `all` and `exists` are enabled only via macros. +func ClearMacros() EnvOption { + return func(e *Env) (*Env, error) { + e.macros = NoMacros + return e, nil + } +} + +// CustomTypeAdapter swaps the default types.Adapter implementation with a custom one. +// +// Note: This option must be specified before the Types and TypeDescs options when used together. +func CustomTypeAdapter(adapter types.Adapter) EnvOption { + return func(e *Env) (*Env, error) { + e.adapter = adapter + return e, nil + } +} + +// CustomTypeProvider replaces the types.Provider implementation with a custom one. +// +// The `provider` variable type may either be types.Provider or ref.TypeProvider (deprecated) +// +// Note: This option must be specified before the Types and TypeDescs options when used together. +func CustomTypeProvider(provider any) EnvOption { + return func(e *Env) (*Env, error) { + var err error + e.provider, err = maybeInteropProvider(provider) + return e, err + } +} + +// Declarations option extends the declaration set configured in the environment. +// +// Note: Declarations will by default be appended to the pre-existing declaration set configured +// for the environment. The NewEnv call builds on top of the standard CEL declarations. For a +// purely custom set of declarations use NewCustomEnv. +func Declarations(decls ...*exprpb.Decl) EnvOption { + declOpts := []EnvOption{} + var err error + var opt EnvOption + // Convert the declarations to `EnvOption` values ahead of time. + // Surface any errors in conversion when the options are applied. + for _, d := range decls { + opt, err = ExprDeclToDeclaration(d) + if err != nil { + break + } + declOpts = append(declOpts, opt) + } + return func(e *Env) (*Env, error) { + if err != nil { + return nil, err + } + for _, o := range declOpts { + e, err = o(e) + if err != nil { + return nil, err + } + } + return e, nil + } +} + +// EagerlyValidateDeclarations ensures that any collisions between configured declarations are caught +// at the time of the `NewEnv` call. +// +// Eagerly validating declarations is also useful for bootstrapping a base `cel.Env` value. +// Calls to base `Env.Extend()` will be significantly faster when declarations are eagerly validated +// as declarations will be collision-checked at most once and only incrementally by way of `Extend` +// +// Disabled by default as not all environments are used for type-checking. +func EagerlyValidateDeclarations(enabled bool) EnvOption { + return features(featureEagerlyValidateDeclarations, enabled) +} + +// HomogeneousAggregateLiterals disables mixed type list and map literal values. +// +// Note, it is still possible to have heterogeneous aggregates when provided as variables to the +// expression, as well as via conversion of well-known dynamic types, or with unchecked +// expressions. +func HomogeneousAggregateLiterals() EnvOption { + return ASTValidators(ValidateHomogeneousAggregateLiterals()) +} + +// variadicLogicalOperatorASTs flatten like-operator chained logical expressions into a single +// variadic call with N-terms. This behavior is useful when serializing to a protocol buffer as +// it will reduce the number of recursive calls needed to deserialize the AST later. +// +// For example, given the following expression the call graph will be rendered accordingly: +// +// expression: a && b && c && (d || e) +// ast: call(_&&_, [a, b, c, call(_||_, [d, e])]) +func variadicLogicalOperatorASTs() EnvOption { + return features(featureVariadicLogicalASTs, true) +} + +// Macros option extends the macro set configured in the environment. +// +// Note: This option must be specified after ClearMacros if used together. +func Macros(macros ...Macro) EnvOption { + return func(e *Env) (*Env, error) { + e.macros = append(e.macros, macros...) + return e, nil + } +} + +// Container sets the container for resolving variable names. Defaults to an empty container. +// +// If all references within an expression are relative to a protocol buffer package, then +// specifying a container of `google.type` would make it possible to write expressions such as +// `Expr{expression: 'a < b'}` instead of having to write `google.type.Expr{...}`. +func Container(name string) EnvOption { + return func(e *Env) (*Env, error) { + cont, err := e.Container.Extend(containers.Name(name)) + if err != nil { + return nil, err + } + e.Container = cont + return e, nil + } +} + +// Abbrevs configures a set of simple names as abbreviations for fully-qualified names. +// +// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name. +// Abbreviations can be useful when working with variables, functions, and especially types from +// multiple namespaces: +// +// // CEL object construction +// qual.pkg.version.ObjTypeName{ +// field: alt.container.ver.FieldTypeName{value: ...} +// } +// +// Only one the qualified names above may be used as the CEL container, so at least one of these +// references must be a long qualified name within an otherwise short CEL program. Using the +// following abbreviations, the program becomes much simpler: +// +// // CEL Go option +// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName") +// // Simplified Object construction +// ObjTypeName{field: FieldTypeName{value: ...}} +// +// There are a few rules for the qualified names and the simple abbreviations generated from them: +// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`. +// - The last element in the qualified name is the abbreviation. +// - Abbreviations must not collide with each other. +// - The abbreviation must not collide with unqualified names in use. +// +// Abbreviations are distinct from container-based references in the following important ways: +// - Abbreviations must expand to a fully-qualified name. +// - Expanded abbreviations do not participate in namespace resolution. +// - Abbreviation expansion is done instead of the container search for a matching identifier. +// - Containers follow C++ namespace resolution rules with searches from the most qualified name +// +// to the least qualified name. +// +// - Container references within the CEL program may be relative, and are resolved to fully +// +// qualified names at either type-check time or program plan time, whichever comes first. +// +// If there is ever a case where an identifier could be in both the container and as an +// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is +// preserved between compilations even as the container evolves. +func Abbrevs(qualifiedNames ...string) EnvOption { + return func(e *Env) (*Env, error) { + cont, err := e.Container.Extend(containers.Abbrevs(qualifiedNames...)) + if err != nil { + return nil, err + } + e.Container = cont + return e, nil + } +} + +// customTypeRegistry is an internal-only interface containing the minimum methods required to support +// custom types. It is a subset of methods from ref.TypeRegistry. +type customTypeRegistry interface { + RegisterDescriptor(protoreflect.FileDescriptor) error + RegisterType(...ref.Type) error +} + +// Types adds one or more type declarations to the environment, allowing for construction of +// type-literals whose definitions are included in the common expression built-in set. +// +// The input types may either be instances of `proto.Message` or `ref.Type`. Any other type +// provided to this option will result in an error. +// +// Well-known protobuf types within the `google.protobuf.*` package are included in the standard +// environment by default. +// +// Note: This option must be specified after the CustomTypeProvider option when used together. +func Types(addTypes ...any) EnvOption { + return func(e *Env) (*Env, error) { + reg, isReg := e.provider.(customTypeRegistry) + if !isReg { + return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) + } + for _, t := range addTypes { + switch v := t.(type) { + case proto.Message: + fdMap := pb.CollectFileDescriptorSet(v) + for _, fd := range fdMap { + err := reg.RegisterDescriptor(fd) + if err != nil { + return nil, err + } + } + case ref.Type: + err := reg.RegisterType(v) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported type: %T", t) + } + } + return e, nil + } +} + +// TypeDescs adds type declarations from any protoreflect.FileDescriptor, protoregistry.Files, +// google.protobuf.FileDescriptorProto or google.protobuf.FileDescriptorSet provided. +// +// Note that messages instantiated from these descriptors will be *dynamicpb.Message values +// rather than the concrete message type. +// +// TypeDescs are hermetic to a single Env object, but may be copied to other Env values via +// extension or by re-using the same EnvOption with another NewEnv() call. +func TypeDescs(descs ...any) EnvOption { + return func(e *Env) (*Env, error) { + reg, isReg := e.provider.(customTypeRegistry) + if !isReg { + return nil, fmt.Errorf("custom types not supported by provider: %T", e.provider) + } + // Scan the input descriptors for FileDescriptorProto messages and accumulate them into a + // synthetic FileDescriptorSet as the FileDescriptorProto messages may refer to each other + // and will not resolve properly unless they are part of the same set. + var fds *descpb.FileDescriptorSet + for _, d := range descs { + switch f := d.(type) { + case *descpb.FileDescriptorProto: + if fds == nil { + fds = &descpb.FileDescriptorSet{ + File: []*descpb.FileDescriptorProto{}, + } + } + fds.File = append(fds.File, f) + } + } + if fds != nil { + if err := registerFileSet(reg, fds); err != nil { + return nil, err + } + } + for _, d := range descs { + switch f := d.(type) { + case *protoregistry.Files: + if err := registerFiles(reg, f); err != nil { + return nil, err + } + case protoreflect.FileDescriptor: + if err := reg.RegisterDescriptor(f); err != nil { + return nil, err + } + case *descpb.FileDescriptorSet: + if err := registerFileSet(reg, f); err != nil { + return nil, err + } + case *descpb.FileDescriptorProto: + // skip, handled as a synthetic file descriptor set. + default: + return nil, fmt.Errorf("unsupported type descriptor: %T", d) + } + } + return e, nil + } +} + +func registerFileSet(reg customTypeRegistry, fileSet *descpb.FileDescriptorSet) error { + files, err := protodesc.NewFiles(fileSet) + if err != nil { + return fmt.Errorf("protodesc.NewFiles(%v) failed: %v", fileSet, err) + } + return registerFiles(reg, files) +} + +func registerFiles(reg customTypeRegistry, files *protoregistry.Files) error { + var err error + files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + err = reg.RegisterDescriptor(fd) + return err == nil + }) + return err +} + +// ProgramOption is a functional interface for configuring evaluation bindings and behaviors. +type ProgramOption func(p *prog) (*prog, error) + +// CustomDecorator appends an InterpreterDecorator to the program. +// +// InterpretableDecorators can be used to inspect, alter, or replace the Program plan. +func CustomDecorator(dec interpreter.InterpretableDecorator) ProgramOption { + return func(p *prog) (*prog, error) { + p.decorators = append(p.decorators, dec) + return p, nil + } +} + +// Functions adds function overloads that extend or override the set of CEL built-ins. +// +// Deprecated: use Function() instead to declare the function, its overload signatures, +// and the overload implementations. +func Functions(funcs ...*functions.Overload) ProgramOption { + return func(p *prog) (*prog, error) { + if err := p.dispatcher.Add(funcs...); err != nil { + return nil, err + } + return p, nil + } +} + +// Globals sets the global variable values for a given program. These values may be shadowed by +// variables with the same name provided to the Eval() call. If Globals is used in a Library with +// a Lib EnvOption, vars may shadow variables provided by previously added libraries. +// +// The vars value may either be an `interpreter.Activation` instance or a `map[string]any`. +func Globals(vars any) ProgramOption { + return func(p *prog) (*prog, error) { + defaultVars, err := interpreter.NewActivation(vars) + if err != nil { + return nil, err + } + if p.defaultVars != nil { + defaultVars = interpreter.NewHierarchicalActivation(p.defaultVars, defaultVars) + } + p.defaultVars = defaultVars + return p, nil + } +} + +// OptimizeRegex provides a way to replace the InterpretableCall for regex functions. This can be used +// to compile regex string constants at program creation time and report any errors and then use the +// compiled regex for all regex function invocations. +func OptimizeRegex(regexOptimizations ...*interpreter.RegexOptimization) ProgramOption { + return func(p *prog) (*prog, error) { + p.regexOptimizations = append(p.regexOptimizations, regexOptimizations...) + return p, nil + } +} + +// EvalOption indicates an evaluation option that may affect the evaluation behavior or information +// in the output result. +type EvalOption int + +const ( + // OptTrackState will cause the runtime to return an immutable EvalState value in the Result. + OptTrackState EvalOption = 1 << iota + + // OptExhaustiveEval causes the runtime to disable short-circuits and track state. + OptExhaustiveEval EvalOption = 1< 0 { + decorators = append(decorators, interpreter.InterruptableEval()) + } + // Enable constant folding first. + if p.evalOpts&OptOptimize == OptOptimize { + decorators = append(decorators, interpreter.Optimize()) + p.regexOptimizations = append(p.regexOptimizations, interpreter.MatchesRegexOptimization) + } + // Enable regex compilation of constants immediately after folding constants. + if len(p.regexOptimizations) > 0 { + decorators = append(decorators, interpreter.CompileRegexConstants(p.regexOptimizations...)) + } + + // Enable exhaustive eval, state tracking and cost tracking last since they require a factory. + if p.evalOpts&(OptExhaustiveEval|OptTrackState|OptTrackCost) != 0 { + factory := func(state interpreter.EvalState, costTracker *interpreter.CostTracker) (Program, error) { + costTracker.Estimator = p.callCostEstimator + costTracker.Limit = p.costLimit + for _, costOpt := range p.costOptions { + err := costOpt(costTracker) + if err != nil { + return nil, err + } + } + // Limit capacity to guarantee a reallocation when calling 'append(decs, ...)' below. This + // prevents the underlying memory from being shared between factory function calls causing + // undesired mutations. + decs := decorators[:len(decorators):len(decorators)] + var observers []interpreter.EvalObserver + + if p.evalOpts&(OptExhaustiveEval|OptTrackState) != 0 { + // EvalStateObserver is required for OptExhaustiveEval. + observers = append(observers, interpreter.EvalStateObserver(state)) + } + if p.evalOpts&OptTrackCost == OptTrackCost { + observers = append(observers, interpreter.CostObserver(costTracker)) + } + + // Enable exhaustive eval over a basic observer since it offers a superset of features. + if p.evalOpts&OptExhaustiveEval == OptExhaustiveEval { + decs = append(decs, interpreter.ExhaustiveEval(), interpreter.Observe(observers...)) + } else if len(observers) > 0 { + decs = append(decs, interpreter.Observe(observers...)) + } + + return p.clone().initInterpretable(a, decs) + } + return newProgGen(factory) + } + return p.initInterpretable(a, decorators) +} + +func (p *prog) initInterpretable(a *Ast, decs []interpreter.InterpretableDecorator) (*prog, error) { + // When the AST has been exprAST it contains metadata that can be used to speed up program execution. + interpretable, err := p.interpreter.NewInterpretable(a.impl, decs...) + if err != nil { + return nil, err + } + p.interpretable = interpretable + return p, nil +} + +// Eval implements the Program interface method. +func (p *prog) Eval(input any) (v ref.Val, det *EvalDetails, err error) { + // Configure error recovery for unexpected panics during evaluation. Note, the use of named + // return values makes it possible to modify the error response during the recovery + // function. + defer func() { + if r := recover(); r != nil { + switch t := r.(type) { + case interpreter.EvalCancelledError: + err = t + default: + err = fmt.Errorf("internal error: %v", r) + } + } + }() + // Build a hierarchical activation if there are default vars set. + var vars interpreter.Activation + switch v := input.(type) { + case interpreter.Activation: + vars = v + case map[string]any: + vars = activationPool.Setup(v) + defer activationPool.Put(vars) + default: + return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input) + } + if p.defaultVars != nil { + vars = interpreter.NewHierarchicalActivation(p.defaultVars, vars) + } + v = p.interpretable.Eval(vars) + // The output of an internal Eval may have a value (`v`) that is a types.Err. This step + // translates the CEL value to a Go error response. This interface does not quite match the + // RPC signature which allows for multiple errors to be returned, but should be sufficient. + if types.IsError(v) { + err = v.(*types.Err) + } + return +} + +// ContextEval implements the Program interface. +func (p *prog) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("context can not be nil") + } + // Configure the input, making sure to wrap Activation inputs in the special ctxActivation which + // exposes the #interrupted variable and manages rate-limited checks of the ctx.Done() state. + var vars interpreter.Activation + switch v := input.(type) { + case interpreter.Activation: + vars = ctxActivationPool.Setup(v, ctx.Done(), p.interruptCheckFrequency) + defer ctxActivationPool.Put(vars) + case map[string]any: + rawVars := activationPool.Setup(v) + defer activationPool.Put(rawVars) + vars = ctxActivationPool.Setup(rawVars, ctx.Done(), p.interruptCheckFrequency) + defer ctxActivationPool.Put(vars) + default: + return nil, nil, fmt.Errorf("invalid input, wanted Activation or map[string]any, got: (%T)%v", input, input) + } + return p.Eval(vars) +} + +// progFactory is a helper alias for marking a program creation factory function. +type progFactory func(interpreter.EvalState, *interpreter.CostTracker) (Program, error) + +// progGen holds a reference to a progFactory instance and implements the Program interface. +type progGen struct { + factory progFactory +} + +// newProgGen tests the factory object by calling it once and returns a factory-based Program if +// the test is successful. +func newProgGen(factory progFactory) (Program, error) { + // Test the factory to make sure that configuration errors are spotted at config + tracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, err + } + _, err = factory(interpreter.NewEvalState(), tracker) + if err != nil { + return nil, err + } + return &progGen{factory: factory}, nil +} + +// Eval implements the Program interface method. +func (gen *progGen) Eval(input any) (ref.Val, *EvalDetails, error) { + // The factory based Eval() differs from the standard evaluation model in that it generates a + // new EvalState instance for each call to ensure that unique evaluations yield unique stateful + // results. + state := interpreter.NewEvalState() + costTracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, nil, err + } + det := &EvalDetails{state: state, costTracker: costTracker} + + // Generate a new instance of the interpretable using the factory configured during the call to + // newProgram(). It is incredibly unlikely that the factory call will generate an error given + // the factory test performed within the Program() call. + p, err := gen.factory(state, costTracker) + if err != nil { + return nil, det, err + } + + // Evaluate the input, returning the result and the 'state' within EvalDetails. + v, _, err := p.Eval(input) + if err != nil { + return v, det, err + } + return v, det, nil +} + +// ContextEval implements the Program interface method. +func (gen *progGen) ContextEval(ctx context.Context, input any) (ref.Val, *EvalDetails, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("context can not be nil") + } + // The factory based Eval() differs from the standard evaluation model in that it generates a + // new EvalState instance for each call to ensure that unique evaluations yield unique stateful + // results. + state := interpreter.NewEvalState() + costTracker, err := interpreter.NewCostTracker(nil) + if err != nil { + return nil, nil, err + } + det := &EvalDetails{state: state, costTracker: costTracker} + + // Generate a new instance of the interpretable using the factory configured during the call to + // newProgram(). It is incredibly unlikely that the factory call will generate an error given + // the factory test performed within the Program() call. + p, err := gen.factory(state, costTracker) + if err != nil { + return nil, det, err + } + + // Evaluate the input, returning the result and the 'state' within EvalDetails. + v, _, err := p.ContextEval(ctx, input) + if err != nil { + return v, det, err + } + return v, det, nil +} + +type ctxEvalActivation struct { + parent interpreter.Activation + interrupt <-chan struct{} + interruptCheckCount uint + interruptCheckFrequency uint +} + +// ResolveName implements the Activation interface method, but adds a special #interrupted variable +// which is capable of testing whether a 'done' signal is provided from a context.Context channel. +func (a *ctxEvalActivation) ResolveName(name string) (any, bool) { + if name == "#interrupted" { + a.interruptCheckCount++ + if a.interruptCheckCount%a.interruptCheckFrequency == 0 { + select { + case <-a.interrupt: + return true, true + default: + return nil, false + } + } + return nil, false + } + return a.parent.ResolveName(name) +} + +func (a *ctxEvalActivation) Parent() interpreter.Activation { + return a.parent +} + +func newCtxEvalActivationPool() *ctxEvalActivationPool { + return &ctxEvalActivationPool{ + Pool: sync.Pool{ + New: func() any { + return &ctxEvalActivation{} + }, + }, + } +} + +type ctxEvalActivationPool struct { + sync.Pool +} + +// Setup initializes a pooled Activation with the ability check for context.Context cancellation +func (p *ctxEvalActivationPool) Setup(vars interpreter.Activation, done <-chan struct{}, interruptCheckRate uint) *ctxEvalActivation { + a := p.Pool.Get().(*ctxEvalActivation) + a.parent = vars + a.interrupt = done + a.interruptCheckCount = 0 + a.interruptCheckFrequency = interruptCheckRate + return a +} + +type evalActivation struct { + vars map[string]any + lazyVars map[string]any +} + +// ResolveName looks up the value of the input variable name, if found. +// +// Lazy bindings may be supplied within the map-based input in either of the following forms: +// - func() any +// - func() ref.Val +// +// The lazy binding will only be invoked once per evaluation. +// +// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using +// the types.Adapter configured in the environment. +func (a *evalActivation) ResolveName(name string) (any, bool) { + v, found := a.vars[name] + if !found { + return nil, false + } + switch obj := v.(type) { + case func() ref.Val: + if resolved, found := a.lazyVars[name]; found { + return resolved, true + } + lazy := obj() + a.lazyVars[name] = lazy + return lazy, true + case func() any: + if resolved, found := a.lazyVars[name]; found { + return resolved, true + } + lazy := obj() + a.lazyVars[name] = lazy + return lazy, true + default: + return obj, true + } +} + +// Parent implements the interpreter.Activation interface +func (a *evalActivation) Parent() interpreter.Activation { + return nil +} + +func newEvalActivationPool() *evalActivationPool { + return &evalActivationPool{ + Pool: sync.Pool{ + New: func() any { + return &evalActivation{lazyVars: make(map[string]any)} + }, + }, + } +} + +type evalActivationPool struct { + sync.Pool +} + +// Setup initializes a pooled Activation object with the map input. +func (p *evalActivationPool) Setup(vars map[string]any) *evalActivation { + a := p.Pool.Get().(*evalActivation) + a.vars = vars + return a +} + +func (p *evalActivationPool) Put(value any) { + a := value.(*evalActivation) + for k := range a.lazyVars { + delete(a.lazyVars, k) + } + p.Pool.Put(a) +} + +var ( + // activationPool is an internally managed pool of Activation values that wrap map[string]any inputs + activationPool = newEvalActivationPool() + + // ctxActivationPool is an internally managed pool of Activation values that expose a special #interrupted variable + ctxActivationPool = newCtxEvalActivationPool() +) diff --git a/vendor/github.com/google/cel-go/cel/validator.go b/vendor/github.com/google/cel-go/cel/validator.go new file mode 100644 index 00000000..b50c6745 --- /dev/null +++ b/vendor/github.com/google/cel-go/cel/validator.go @@ -0,0 +1,375 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cel + +import ( + "fmt" + "reflect" + "regexp" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" +) + +const ( + homogeneousValidatorName = "cel.lib.std.validate.types.homogeneous" + + // HomogeneousAggregateLiteralExemptFunctions is the ValidatorConfig key used to configure + // the set of function names which are exempt from homogeneous type checks. The expected type + // is a string list of function names. + // + // As an example, the `.format([args])` call expects the input arguments list to be + // comprised of a variety of types which correspond to the types expected by the format control + // clauses; however, all other uses of a mixed element type list, would be unexpected. + HomogeneousAggregateLiteralExemptFunctions = homogeneousValidatorName + ".exempt" +) + +// ASTValidators configures a set of ASTValidator instances into the target environment. +// +// Validators are applied in the order in which the are specified and are treated as singletons. +// The same ASTValidator with a given name will not be applied more than once. +func ASTValidators(validators ...ASTValidator) EnvOption { + return func(e *Env) (*Env, error) { + for _, v := range validators { + if !e.HasValidator(v.Name()) { + e.validators = append(e.validators, v) + } + } + return e, nil + } +} + +// ASTValidator defines a singleton interface for validating a type-checked Ast against an environment. +// +// Note: the Issues argument is mutable in the sense that it is intended to collect errors which will be +// reported to the caller. +type ASTValidator interface { + // Name returns the name of the validator. Names must be unique. + Name() string + + // Validate validates a given Ast within an Environment and collects a set of potential issues. + // + // The ValidatorConfig is generated from the set of ASTValidatorConfigurer instances prior to + // the invocation of the Validate call. The expectation is that the validator configuration + // is created in sequence and immutable once provided to the Validate call. + // + // See individual validators for more information on their configuration keys and configuration + // properties. + Validate(*Env, ValidatorConfig, *ast.AST, *Issues) +} + +// ValidatorConfig provides an accessor method for querying validator configuration state. +type ValidatorConfig interface { + GetOrDefault(name string, value any) any +} + +// MutableValidatorConfig provides mutation methods for querying and updating validator configuration +// settings. +type MutableValidatorConfig interface { + ValidatorConfig + Set(name string, value any) error +} + +// ASTValidatorConfigurer indicates that this object, currently expected to be an ASTValidator, +// participates in validator configuration settings. +// +// This interface may be split from the expectation of being an ASTValidator instance in the future. +type ASTValidatorConfigurer interface { + Configure(MutableValidatorConfig) error +} + +// validatorConfig implements the ValidatorConfig and MutableValidatorConfig interfaces. +type validatorConfig struct { + data map[string]any +} + +// newValidatorConfig initializes the validator config with default values for core CEL validators. +func newValidatorConfig() *validatorConfig { + return &validatorConfig{ + data: map[string]any{ + HomogeneousAggregateLiteralExemptFunctions: []string{}, + }, + } +} + +// GetOrDefault returns the configured value for the name, if present, else the input default value. +// +// Note, the type-agreement between the input default and configured value is not checked on read. +func (config *validatorConfig) GetOrDefault(name string, value any) any { + v, found := config.data[name] + if !found { + return value + } + return v +} + +// Set configures a validator option with the given name and value. +// +// If the value had previously been set, the new value must have the same reflection type as the old one, +// or the call will error. +func (config *validatorConfig) Set(name string, value any) error { + v, found := config.data[name] + if found && reflect.TypeOf(v) != reflect.TypeOf(value) { + return fmt.Errorf("incompatible configuration type for %s, got %T, wanted %T", name, value, v) + } + config.data[name] = value + return nil +} + +// ExtendedValidations collects a set of common AST validations which reduce the likelihood of runtime errors. +// +// - Validate duration and timestamp literals +// - Ensure regex strings are valid +// - Disable mixed type list and map literals +func ExtendedValidations() EnvOption { + return ASTValidators( + ValidateDurationLiterals(), + ValidateTimestampLiterals(), + ValidateRegexLiterals(), + ValidateHomogeneousAggregateLiterals(), + ) +} + +// ValidateDurationLiterals ensures that duration literal arguments are valid immediately after type-check. +func ValidateDurationLiterals() ASTValidator { + return newFormatValidator(overloads.TypeConvertDuration, 0, evalCall) +} + +// ValidateTimestampLiterals ensures that timestamp literal arguments are valid immediately after type-check. +func ValidateTimestampLiterals() ASTValidator { + return newFormatValidator(overloads.TypeConvertTimestamp, 0, evalCall) +} + +// ValidateRegexLiterals ensures that regex patterns are validated after type-check. +func ValidateRegexLiterals() ASTValidator { + return newFormatValidator(overloads.Matches, 0, compileRegex) +} + +// ValidateHomogeneousAggregateLiterals checks that all list and map literals entries have the same types, i.e. +// no mixed list element types or mixed map key or map value types. +// +// Note: the string format call relies on a mixed element type list for ease of use, so this check skips all +// literals which occur within string format calls. +func ValidateHomogeneousAggregateLiterals() ASTValidator { + return homogeneousAggregateLiteralValidator{} +} + +// ValidateComprehensionNestingLimit ensures that comprehension nesting does not exceed the specified limit. +// +// This validator can be useful for preventing arbitrarily nested comprehensions which can take high polynomial +// time to complete. +// +// Note, this limit does not apply to comprehensions with an empty iteration range, as these comprehensions have +// no actual looping cost. The cel.bind() utilizes the comprehension structure to perform local variable +// assignments and supplies an empty iteration range, so they won't count against the nesting limit either. +func ValidateComprehensionNestingLimit(limit int) ASTValidator { + return nestingLimitValidator{limit: limit} +} + +type argChecker func(env *Env, call, arg ast.Expr) error + +func newFormatValidator(funcName string, argNum int, check argChecker) formatValidator { + return formatValidator{ + funcName: funcName, + check: check, + argNum: argNum, + } +} + +type formatValidator struct { + funcName string + argNum int + check argChecker +} + +// Name returns the unique name of this function format validator. +func (v formatValidator) Name() string { + return fmt.Sprintf("cel.lib.std.validate.functions.%s", v.funcName) +} + +// Validate searches the AST for uses of a given function name with a constant argument and performs a check +// on whether the argument is a valid literal value. +func (v formatValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) + funcCalls := ast.MatchDescendants(root, ast.FunctionMatcher(v.funcName)) + for _, call := range funcCalls { + callArgs := call.AsCall().Args() + if len(callArgs) <= v.argNum { + continue + } + litArg := callArgs[v.argNum] + if litArg.Kind() != ast.LiteralKind { + continue + } + if err := v.check(e, call, litArg); err != nil { + iss.ReportErrorAtID(litArg.ID(), "invalid %s argument", v.funcName) + } + } +} + +func evalCall(env *Env, call, arg ast.Expr) error { + ast := &Ast{impl: ast.NewAST(call, ast.NewSourceInfo(nil))} + prg, err := env.Program(ast) + if err != nil { + return err + } + _, _, err = prg.Eval(NoVars()) + return err +} + +func compileRegex(_ *Env, _, arg ast.Expr) error { + pattern := arg.AsLiteral().Value().(string) + _, err := regexp.Compile(pattern) + return err +} + +type homogeneousAggregateLiteralValidator struct{} + +// Name returns the unique name of the homogeneous type validator. +func (homogeneousAggregateLiteralValidator) Name() string { + return homogeneousValidatorName +} + +// Validate validates that all lists and map literals have homogeneous types, i.e. don't contain dyn types. +// +// This validator makes an exception for list and map literals which occur at any level of nesting within +// string format calls. +func (v homogeneousAggregateLiteralValidator) Validate(_ *Env, c ValidatorConfig, a *ast.AST, iss *Issues) { + var exemptedFunctions []string + exemptedFunctions = c.GetOrDefault(HomogeneousAggregateLiteralExemptFunctions, exemptedFunctions).([]string) + root := ast.NavigateAST(a) + listExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.ListKind)) + for _, listExpr := range listExprs { + if inExemptFunction(listExpr, exemptedFunctions) { + continue + } + l := listExpr.AsList() + elements := l.Elements() + optIndices := l.OptionalIndices() + var elemType *Type + for i, e := range elements { + et := a.GetType(e.ID()) + if isOptionalIndex(i, optIndices) { + et = et.Parameters()[0] + } + if elemType == nil { + elemType = et + continue + } + if !elemType.IsEquivalentType(et) { + v.typeMismatch(iss, e.ID(), elemType, et) + break + } + } + } + mapExprs := ast.MatchDescendants(root, ast.KindMatcher(ast.MapKind)) + for _, mapExpr := range mapExprs { + if inExemptFunction(mapExpr, exemptedFunctions) { + continue + } + m := mapExpr.AsMap() + entries := m.Entries() + var keyType, valType *Type + for _, e := range entries { + mapEntry := e.AsMapEntry() + key, val := mapEntry.Key(), mapEntry.Value() + kt, vt := a.GetType(key.ID()), a.GetType(val.ID()) + if mapEntry.IsOptional() { + vt = vt.Parameters()[0] + } + if keyType == nil && valType == nil { + keyType, valType = kt, vt + continue + } + if !keyType.IsEquivalentType(kt) { + v.typeMismatch(iss, key.ID(), keyType, kt) + } + if !valType.IsEquivalentType(vt) { + v.typeMismatch(iss, val.ID(), valType, vt) + } + } + } +} + +func inExemptFunction(e ast.NavigableExpr, exemptFunctions []string) bool { + parent, found := e.Parent() + for found { + if parent.Kind() == ast.CallKind { + fnName := parent.AsCall().FunctionName() + for _, exempt := range exemptFunctions { + if exempt == fnName { + return true + } + } + } + parent, found = parent.Parent() + } + return false +} + +func isOptionalIndex(i int, optIndices []int32) bool { + for _, optInd := range optIndices { + if i == int(optInd) { + return true + } + } + return false +} + +func (homogeneousAggregateLiteralValidator) typeMismatch(iss *Issues, id int64, expected, actual *Type) { + iss.ReportErrorAtID(id, "expected type '%s' but found '%s'", FormatCELType(expected), FormatCELType(actual)) +} + +type nestingLimitValidator struct { + limit int +} + +func (v nestingLimitValidator) Name() string { + return "cel.lib.std.validate.comprehension_nesting_limit" +} + +func (v nestingLimitValidator) Validate(e *Env, _ ValidatorConfig, a *ast.AST, iss *Issues) { + root := ast.NavigateAST(a) + comprehensions := ast.MatchDescendants(root, ast.KindMatcher(ast.ComprehensionKind)) + if len(comprehensions) <= v.limit { + return + } + for _, comp := range comprehensions { + count := 0 + e := comp + hasParent := true + for hasParent { + // When the expression is not a comprehension, continue to the next ancestor. + if e.Kind() != ast.ComprehensionKind { + e, hasParent = e.Parent() + continue + } + // When the comprehension has an empty range, continue to the next ancestor + // as this comprehension does not have any associated cost. + iterRange := e.AsComprehension().IterRange() + if iterRange.Kind() == ast.ListKind && iterRange.AsList().Size() == 0 { + e, hasParent = e.Parent() + continue + } + // Otherwise check the nesting limit. + count++ + if count > v.limit { + iss.ReportErrorAtID(comp.ID(), "comprehension exceeds nesting limit") + break + } + e, hasParent = e.Parent() + } + } +} diff --git a/vendor/github.com/google/cel-go/checker/BUILD.bazel b/vendor/github.com/google/cel-go/checker/BUILD.bazel new file mode 100644 index 00000000..678b412a --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/BUILD.bazel @@ -0,0 +1,64 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "checker.go", + "cost.go", + "env.go", + "errors.go", + "format.go", + "mapping.go", + "options.go", + "printer.go", + "scopes.go", + "types.go", + ], + importpath = "github.com/google/cel-go/checker", + visibility = ["//visibility:public"], + deps = [ + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/containers:go_default_library", + "//common/debug:go_default_library", + "//common/decls:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//parser:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "checker_test.go", + "cost_test.go", + "env_test.go", + "format_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/types:go_default_library", + "//parser:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/checker/checker.go b/vendor/github.com/google/cel-go/checker/checker.go new file mode 100644 index 00000000..57fb3ce5 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/checker.go @@ -0,0 +1,696 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package checker defines functions to type-checked a parsed expression +// against a set of identifier and function declarations. +package checker + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +type checker struct { + *ast.AST + ast.ExprFactory + env *Env + errors *typeErrors + mappings *mapping + freeTypeVarCounter int +} + +// Check performs type checking, giving a typed AST. +// +// The input is a parsed AST and an env which encapsulates type binding of variables, +// declarations of built-in functions, descriptions of protocol buffers, and a registry for +// errors. +// +// Returns a type-checked AST, which might not be usable if there are errors in the error +// registry. +func Check(parsed *ast.AST, source common.Source, env *Env) (*ast.AST, *common.Errors) { + errs := common.NewErrors(source) + typeMap := make(map[int64]*types.Type) + refMap := make(map[int64]*ast.ReferenceInfo) + c := checker{ + AST: ast.NewCheckedAST(parsed, typeMap, refMap), + ExprFactory: ast.NewExprFactory(), + env: env, + errors: &typeErrors{errs: errs}, + mappings: newMapping(), + freeTypeVarCounter: 0, + } + c.check(c.Expr()) + + // Walk over the final type map substituting any type parameters either by their bound value + // or by DYN. + for id, t := range c.TypeMap() { + c.SetType(id, substitute(c.mappings, t, true)) + } + return c.AST, errs +} + +func (c *checker) check(e ast.Expr) { + if e == nil { + return + } + switch e.Kind() { + case ast.LiteralKind: + literal := ref.Val(e.AsLiteral()) + switch literal.Type() { + case types.BoolType, types.BytesType, types.DoubleType, types.IntType, + types.NullType, types.StringType, types.UintType: + c.setType(e, literal.Type().(*types.Type)) + default: + c.errors.unexpectedASTType(e.ID(), c.location(e), "literal", literal.Type().TypeName()) + } + case ast.IdentKind: + c.checkIdent(e) + case ast.SelectKind: + c.checkSelect(e) + case ast.CallKind: + c.checkCall(e) + case ast.ListKind: + c.checkCreateList(e) + case ast.MapKind: + c.checkCreateMap(e) + case ast.StructKind: + c.checkCreateStruct(e) + case ast.ComprehensionKind: + c.checkComprehension(e) + default: + c.errors.unexpectedASTType(e.ID(), c.location(e), "unspecified", reflect.TypeOf(e).Name()) + } +} + +func (c *checker) checkIdent(e ast.Expr) { + identName := e.AsIdent() + // Check to see if the identifier is declared. + if ident := c.env.LookupIdent(identName); ident != nil { + c.setType(e, ident.Type()) + c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + // Overwrite the identifier with its fully qualified name. + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) + return + } + + c.setType(e, types.ErrorType) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), identName) +} + +func (c *checker) checkSelect(e ast.Expr) { + sel := e.AsSelect() + // Before traversing down the tree, try to interpret as qualified name. + qname, found := containers.ToQualifiedName(e) + if found { + ident := c.env.LookupIdent(qname) + if ident != nil { + // We don't check for a TestOnly expression here since the `found` result is + // always going to be false for TestOnly expressions. + + // Rewrite the node to be a variable reference to the resolved fully-qualified + // variable name. + c.setType(e, ident.Type()) + c.setReference(e, ast.NewIdentReference(ident.Name(), ident.Value())) + e.SetKindCase(c.NewIdent(e.ID(), ident.Name())) + return + } + } + + resultType := c.checkSelectField(e, sel.Operand(), sel.FieldName(), false) + if sel.IsTestOnly() { + resultType = types.BoolType + } + c.setType(e, substitute(c.mappings, resultType, false)) +} + +func (c *checker) checkOptSelect(e ast.Expr) { + // Collect metadata related to the opt select call packaged by the parser. + call := e.AsCall() + operand := call.Args()[0] + field := call.Args()[1] + fieldName, isString := maybeUnwrapString(field) + if !isString { + c.errors.notAnOptionalFieldSelection(field.ID(), c.location(field), field) + return + } + + // Perform type-checking using the field selection logic. + resultType := c.checkSelectField(e, operand, fieldName, true) + c.setType(e, substitute(c.mappings, resultType, false)) + c.setReference(e, ast.NewFunctionReference("select_optional_field")) +} + +func (c *checker) checkSelectField(e, operand ast.Expr, field string, optional bool) *types.Type { + // Interpret as field selection, first traversing down the operand. + c.check(operand) + operandType := substitute(c.mappings, c.getType(operand), false) + + // If the target type is 'optional', unwrap it for the sake of this check. + targetType, isOpt := maybeUnwrapOptional(operandType) + + // Assume error type by default as most types do not support field selection. + resultType := types.ErrorType + switch targetType.Kind() { + case types.MapKind: + // Maps yield their value type as the selection result type. + resultType = targetType.Parameters()[1] + case types.StructKind: + // Objects yield their field type declaration as the selection result type, but only if + // the field is defined. + messageType := targetType + if fieldType, found := c.lookupFieldType(e.ID(), messageType.TypeName(), field); found { + resultType = fieldType + } + case types.TypeParamKind: + // Set the operand type to DYN to prevent assignment to a potentially incorrect type + // at a later point in type-checking. The isAssignable call will update the type + // substitutions for the type param under the covers. + c.isAssignable(types.DynType, targetType) + // Also, set the result type to DYN. + resultType = types.DynType + default: + // Dynamic / error values are treated as DYN type. Errors are handled this way as well + // in order to allow forward progress on the check. + if !isDynOrError(targetType) { + c.errors.typeDoesNotSupportFieldSelection(e.ID(), c.location(e), targetType) + } + resultType = types.DynType + } + + // If the target type was optional coming in, then the result must be optional going out. + if isOpt || optional { + return types.NewOptionalType(resultType) + } + return resultType +} + +func (c *checker) checkCall(e ast.Expr) { + // Note: similar logic exists within the `interpreter/planner.go`. If making changes here + // please consider the impact on planner.go and consolidate implementations or mirror code + // as appropriate. + call := e.AsCall() + fnName := call.FunctionName() + if fnName == operators.OptSelect { + c.checkOptSelect(e) + return + } + + args := call.Args() + // Traverse arguments. + for _, arg := range args { + c.check(arg) + } + + // Regular static call with simple name. + if !call.IsMemberFunction() { + // Check for the existence of the function. + fn := c.env.LookupFunction(fnName) + if fn == nil { + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) + c.setType(e, types.ErrorType) + return + } + // Overwrite the function name with its fully qualified resolved name. + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) + // Check to see whether the overload resolves. + c.resolveOverloadOrError(e, fn, nil, args) + return + } + + // If a receiver 'target' is present, it may either be a receiver function, or a namespaced + // function, but not both. Given a.b.c() either a.b.c is a function or c is a function with + // target a.b. + // + // Check whether the target is a namespaced function name. + target := call.Target() + qualifiedPrefix, maybeQualified := containers.ToQualifiedName(target) + if maybeQualified { + maybeQualifiedName := qualifiedPrefix + "." + fnName + fn := c.env.LookupFunction(maybeQualifiedName) + if fn != nil { + // The function name is namespaced and so preserving the target operand would + // be an inaccurate representation of the desired evaluation behavior. + // Overwrite with fully-qualified resolved function name sans receiver target. + e.SetKindCase(c.NewCall(e.ID(), fn.Name(), args...)) + c.resolveOverloadOrError(e, fn, nil, args) + return + } + } + + // Regular instance call. + c.check(target) + fn := c.env.LookupFunction(fnName) + // Function found, attempt overload resolution. + if fn != nil { + c.resolveOverloadOrError(e, fn, target, args) + return + } + // Function name not declared, record error. + c.setType(e, types.ErrorType) + c.errors.undeclaredReference(e.ID(), c.location(e), c.env.container.Name(), fnName) +} + +func (c *checker) resolveOverloadOrError( + e ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) { + // Attempt to resolve the overload. + resolution := c.resolveOverload(e, fn, target, args) + // No such overload, error noted in the resolveOverload call, type recorded here. + if resolution == nil { + c.setType(e, types.ErrorType) + return + } + // Overload found. + c.setType(e, resolution.Type) + c.setReference(e, resolution.Reference) +} + +func (c *checker) resolveOverload( + call ast.Expr, fn *decls.FunctionDecl, target ast.Expr, args []ast.Expr) *overloadResolution { + + var argTypes []*types.Type + if target != nil { + argTypes = append(argTypes, c.getType(target)) + } + for _, arg := range args { + argTypes = append(argTypes, c.getType(arg)) + } + + var resultType *types.Type + var checkedRef *ast.ReferenceInfo + for _, overload := range fn.OverloadDecls() { + // Determine whether the overload is currently considered. + if c.env.isOverloadDisabled(overload.ID()) { + continue + } + + // Ensure the call style for the overload matches. + if (target == nil && overload.IsMemberFunction()) || + (target != nil && !overload.IsMemberFunction()) { + // not a compatible call style. + continue + } + + // Alternative type-checking behavior when the logical operators are compacted into + // variadic AST representations. + if fn.Name() == operators.LogicalAnd || fn.Name() == operators.LogicalOr { + checkedRef = ast.NewFunctionReference(overload.ID()) + for i, argType := range argTypes { + if !c.isAssignable(argType, types.BoolType) { + c.errors.typeMismatch( + args[i].ID(), + c.locationByID(args[i].ID()), + types.BoolType, + argType) + resultType = types.ErrorType + } + } + if isError(resultType) { + return nil + } + return newResolution(checkedRef, types.BoolType) + } + + overloadType := newFunctionType(overload.ResultType(), overload.ArgTypes()...) + typeParams := overload.TypeParams() + if len(typeParams) != 0 { + // Instantiate overload's type with fresh type variables. + substitutions := newMapping() + for _, typePar := range typeParams { + substitutions.add(types.NewTypeParamType(typePar), c.newTypeVar()) + } + overloadType = substitute(substitutions, overloadType, false) + } + + candidateArgTypes := overloadType.Parameters()[1:] + if c.isAssignableList(argTypes, candidateArgTypes) { + if checkedRef == nil { + checkedRef = ast.NewFunctionReference(overload.ID()) + } else { + checkedRef.AddOverload(overload.ID()) + } + + // First matching overload, determines result type. + fnResultType := substitute(c.mappings, overloadType.Parameters()[0], false) + if resultType == nil { + resultType = fnResultType + } else if !isDyn(resultType) && !fnResultType.IsExactType(resultType) { + resultType = types.DynType + } + } + } + + if resultType == nil { + for i, argType := range argTypes { + argTypes[i] = substitute(c.mappings, argType, true) + } + c.errors.noMatchingOverload(call.ID(), c.location(call), fn.Name(), argTypes, target != nil) + return nil + } + + return newResolution(checkedRef, resultType) +} + +func (c *checker) checkCreateList(e ast.Expr) { + create := e.AsList() + var elemsType *types.Type + optionalIndices := create.OptionalIndices() + optionals := make(map[int32]bool, len(optionalIndices)) + for _, optInd := range optionalIndices { + optionals[optInd] = true + } + for i, e := range create.Elements() { + c.check(e) + elemType := c.getType(e) + if optionals[int32(i)] { + var isOptional bool + elemType, isOptional = maybeUnwrapOptional(elemType) + if !isOptional && !isDyn(elemType) { + c.errors.typeMismatch(e.ID(), c.location(e), types.NewOptionalType(elemType), elemType) + } + } + elemsType = c.joinTypes(e, elemsType, elemType) + } + if elemsType == nil { + // If the list is empty, assign free type var to elem type. + elemsType = c.newTypeVar() + } + c.setType(e, types.NewListType(elemsType)) +} + +func (c *checker) checkCreateMap(e ast.Expr) { + mapVal := e.AsMap() + var mapKeyType *types.Type + var mapValueType *types.Type + for _, e := range mapVal.Entries() { + entry := e.AsMapEntry() + key := entry.Key() + c.check(key) + mapKeyType = c.joinTypes(key, mapKeyType, c.getType(key)) + + val := entry.Value() + c.check(val) + valType := c.getType(val) + if entry.IsOptional() { + var isOptional bool + valType, isOptional = maybeUnwrapOptional(valType) + if !isOptional && !isDyn(valType) { + c.errors.typeMismatch(val.ID(), c.location(val), types.NewOptionalType(valType), valType) + } + } + mapValueType = c.joinTypes(val, mapValueType, valType) + } + if mapKeyType == nil { + // If the map is empty, assign free type variables to typeKey and value type. + mapKeyType = c.newTypeVar() + mapValueType = c.newTypeVar() + } + c.setType(e, types.NewMapType(mapKeyType, mapValueType)) +} + +func (c *checker) checkCreateStruct(e ast.Expr) { + msgVal := e.AsStruct() + // Determine the type of the message. + resultType := types.ErrorType + ident := c.env.LookupIdent(msgVal.TypeName()) + if ident == nil { + c.errors.undeclaredReference( + e.ID(), c.location(e), c.env.container.Name(), msgVal.TypeName()) + c.setType(e, types.ErrorType) + return + } + // Ensure the type name is fully qualified in the AST. + typeName := ident.Name() + if msgVal.TypeName() != typeName { + e.SetKindCase(c.NewStruct(e.ID(), typeName, msgVal.Fields())) + msgVal = e.AsStruct() + } + c.setReference(e, ast.NewIdentReference(typeName, nil)) + identKind := ident.Type().Kind() + if identKind != types.ErrorKind { + if identKind != types.TypeKind { + c.errors.notAType(e.ID(), c.location(e), ident.Type().DeclaredTypeName()) + } else { + resultType = ident.Type().Parameters()[0] + // Backwards compatibility test between well-known types and message types + // In this context, the type is being instantiated by its protobuf name which + // is not ideal or recommended, but some users expect this to work. + if isWellKnownType(resultType) { + typeName = getWellKnownTypeName(resultType) + } else if resultType.Kind() == types.StructKind { + typeName = resultType.DeclaredTypeName() + } else { + c.errors.notAMessageType(e.ID(), c.location(e), resultType.DeclaredTypeName()) + resultType = types.ErrorType + } + } + } + c.setType(e, resultType) + + // Check the field initializers. + for _, f := range msgVal.Fields() { + field := f.AsStructField() + fieldName := field.Name() + value := field.Value() + c.check(value) + + fieldType := types.ErrorType + ft, found := c.lookupFieldType(f.ID(), typeName, fieldName) + if found { + fieldType = ft + } + + valType := c.getType(value) + if field.IsOptional() { + var isOptional bool + valType, isOptional = maybeUnwrapOptional(valType) + if !isOptional && !isDyn(valType) { + c.errors.typeMismatch(value.ID(), c.location(value), types.NewOptionalType(valType), valType) + } + } + if !c.isAssignable(fieldType, valType) { + c.errors.fieldTypeMismatch(f.ID(), c.locationByID(f.ID()), fieldName, fieldType, valType) + } + } +} + +func (c *checker) checkComprehension(e ast.Expr) { + comp := e.AsComprehension() + c.check(comp.IterRange()) + c.check(comp.AccuInit()) + accuType := c.getType(comp.AccuInit()) + rangeType := substitute(c.mappings, c.getType(comp.IterRange()), false) + var varType *types.Type + + switch rangeType.Kind() { + case types.ListKind: + varType = rangeType.Parameters()[0] + case types.MapKind: + // Ranges over the keys. + varType = rangeType.Parameters()[0] + case types.DynKind, types.ErrorKind, types.TypeParamKind: + // Set the range type to DYN to prevent assignment to a potentially incorrect type + // at a later point in type-checking. The isAssignable call will update the type + // substitutions for the type param under the covers. + c.isAssignable(types.DynType, rangeType) + // Set the range iteration variable to type DYN as well. + varType = types.DynType + default: + c.errors.notAComprehensionRange(comp.IterRange().ID(), c.location(comp.IterRange()), rangeType) + varType = types.ErrorType + } + + // Create a scope for the comprehension since it has a local accumulation variable. + // This scope will contain the accumulation variable used to compute the result. + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.AccuVar(), accuType)) + // Create a block scope for the loop. + c.env = c.env.enterScope() + c.env.AddIdents(decls.NewVariable(comp.IterVar(), varType)) + // Check the variable references in the condition and step. + c.check(comp.LoopCondition()) + c.assertType(comp.LoopCondition(), types.BoolType) + c.check(comp.LoopStep()) + c.assertType(comp.LoopStep(), accuType) + // Exit the loop's block scope before checking the result. + c.env = c.env.exitScope() + c.check(comp.Result()) + // Exit the comprehension scope. + c.env = c.env.exitScope() + c.setType(e, substitute(c.mappings, c.getType(comp.Result()), false)) +} + +// Checks compatibility of joined types, and returns the most general common type. +func (c *checker) joinTypes(e ast.Expr, previous, current *types.Type) *types.Type { + if previous == nil { + return current + } + if c.isAssignable(previous, current) { + return mostGeneral(previous, current) + } + if c.dynAggregateLiteralElementTypesEnabled() { + return types.DynType + } + c.errors.typeMismatch(e.ID(), c.location(e), previous, current) + return types.ErrorType +} + +func (c *checker) dynAggregateLiteralElementTypesEnabled() bool { + return c.env.aggLitElemType == dynElementType +} + +func (c *checker) newTypeVar() *types.Type { + id := c.freeTypeVarCounter + c.freeTypeVarCounter++ + return types.NewTypeParamType(fmt.Sprintf("_var%d", id)) +} + +func (c *checker) isAssignable(t1, t2 *types.Type) bool { + subs := isAssignable(c.mappings, t1, t2) + if subs != nil { + c.mappings = subs + return true + } + + return false +} + +func (c *checker) isAssignableList(l1, l2 []*types.Type) bool { + subs := isAssignableList(c.mappings, l1, l2) + if subs != nil { + c.mappings = subs + return true + } + + return false +} + +func maybeUnwrapString(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.LiteralKind: + literal := e.AsLiteral() + switch v := literal.(type) { + case types.String: + return string(v), true + } + } + return "", false +} + +func (c *checker) setType(e ast.Expr, t *types.Type) { + if old, found := c.TypeMap()[e.ID()]; found && !old.IsExactType(t) { + c.errors.incompatibleType(e.ID(), c.location(e), e, old, t) + return + } + c.SetType(e.ID(), t) +} + +func (c *checker) getType(e ast.Expr) *types.Type { + return c.TypeMap()[e.ID()] +} + +func (c *checker) setReference(e ast.Expr, r *ast.ReferenceInfo) { + if old, found := c.ReferenceMap()[e.ID()]; found && !old.Equals(r) { + c.errors.referenceRedefinition(e.ID(), c.location(e), e, old, r) + return + } + c.SetReference(e.ID(), r) +} + +func (c *checker) assertType(e ast.Expr, t *types.Type) { + if !c.isAssignable(t, c.getType(e)) { + c.errors.typeMismatch(e.ID(), c.location(e), t, c.getType(e)) + } +} + +type overloadResolution struct { + Type *types.Type + Reference *ast.ReferenceInfo +} + +func newResolution(r *ast.ReferenceInfo, t *types.Type) *overloadResolution { + return &overloadResolution{ + Reference: r, + Type: t, + } +} + +func (c *checker) location(e ast.Expr) common.Location { + return c.locationByID(e.ID()) +} + +func (c *checker) locationByID(id int64) common.Location { + return c.SourceInfo().GetStartLocation(id) +} + +func (c *checker) lookupFieldType(exprID int64, structType, fieldName string) (*types.Type, bool) { + if _, found := c.env.provider.FindStructType(structType); !found { + // This should not happen, anyway, report an error. + c.errors.unexpectedFailedResolution(exprID, c.locationByID(exprID), structType) + return nil, false + } + + if ft, found := c.env.provider.FindStructFieldType(structType, fieldName); found { + return ft.Type, found + } + + c.errors.undefinedField(exprID, c.locationByID(exprID), fieldName) + return nil, false +} + +func isWellKnownType(t *types.Type) bool { + switch t.Kind() { + case types.AnyKind, types.TimestampKind, types.DurationKind, types.DynKind, types.NullTypeKind: + return true + case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind: + return t.IsAssignableType(types.NullType) + case types.ListKind: + return t.Parameters()[0] == types.DynType + case types.MapKind: + return t.Parameters()[0] == types.StringType && t.Parameters()[1] == types.DynType + } + return false +} + +func getWellKnownTypeName(t *types.Type) string { + if name, found := wellKnownTypes[t.Kind()]; found { + return name + } + return "" +} + +var ( + wellKnownTypes = map[types.Kind]string{ + types.AnyKind: "google.protobuf.Any", + types.BoolKind: "google.protobuf.BoolValue", + types.BytesKind: "google.protobuf.BytesValue", + types.DoubleKind: "google.protobuf.DoubleValue", + types.DurationKind: "google.protobuf.Duration", + types.DynKind: "google.protobuf.Value", + types.IntKind: "google.protobuf.Int64Value", + types.ListKind: "google.protobuf.ListValue", + types.NullTypeKind: "google.protobuf.NullValue", + types.MapKind: "google.protobuf.Struct", + types.StringKind: "google.protobuf.StringValue", + types.TimestampKind: "google.protobuf.Timestamp", + types.UintKind: "google.protobuf.UInt64Value", + } +) diff --git a/vendor/github.com/google/cel-go/checker/cost.go b/vendor/github.com/google/cel-go/checker/cost.go new file mode 100644 index 00000000..04244694 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/cost.go @@ -0,0 +1,705 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "math" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser" +) + +// WARNING: Any changes to cost calculations in this file require a corresponding change in interpreter/runtimecost.go + +// CostEstimator estimates the sizes of variable length input data and the costs of functions. +type CostEstimator interface { + // EstimateSize returns a SizeEstimate for the given AstNode, or nil if + // the estimator has no estimate to provide. The size is equivalent to the result of the CEL `size()` function: + // length of strings and bytes, number of map entries or number of list items. + // EstimateSize is only called for AstNodes where + // CEL does not know the size; EstimateSize is not called for values defined inline in CEL where the size + // is already obvious to CEL. + EstimateSize(element AstNode) *SizeEstimate + // EstimateCallCost returns the estimated cost of an invocation, or nil if + // the estimator has no estimate to provide. + EstimateCallCost(function, overloadID string, target *AstNode, args []AstNode) *CallEstimate +} + +// CallEstimate includes a CostEstimate for the call, and an optional estimate of the result object size. +// The ResultSize should only be provided if the call results in a map, list, string or bytes. +type CallEstimate struct { + CostEstimate + ResultSize *SizeEstimate +} + +// AstNode represents an AST node for the purpose of cost estimations. +type AstNode interface { + // Path returns a field path through the provided type declarations to the type of the AstNode, or nil if the AstNode does not + // represent type directly reachable from the provided type declarations. + // The first path element is a variable. All subsequent path elements are one of: field name, '@items', '@keys', '@values'. + Path() []string + // Type returns the deduced type of the AstNode. + Type() *types.Type + // Expr returns the expression of the AstNode. + Expr() ast.Expr + // ComputedSize returns a size estimate of the AstNode derived from information available in the CEL expression. + // For constants and inline list and map declarations, the exact size is returned. For concatenated list, strings + // and bytes, the size is derived from the size estimates of the operands. nil is returned if there is no + // computed size available. + ComputedSize() *SizeEstimate +} + +type astNode struct { + path []string + t *types.Type + expr ast.Expr + derivedSize *SizeEstimate +} + +func (e astNode) Path() []string { + return e.path +} + +func (e astNode) Type() *types.Type { + return e.t +} + +func (e astNode) Expr() ast.Expr { + return e.expr +} + +func (e astNode) ComputedSize() *SizeEstimate { + if e.derivedSize != nil { + return e.derivedSize + } + var v uint64 + switch e.expr.Kind() { + case ast.LiteralKind: + switch ck := e.expr.AsLiteral().(type) { + case types.String: + // converting to runes here is an O(n) operation, but + // this is consistent with how size is computed at runtime, + // and how the language definition defines string size + v = uint64(len([]rune(ck))) + case types.Bytes: + v = uint64(len(ck)) + case types.Bool, types.Double, types.Duration, + types.Int, types.Timestamp, types.Uint, + types.Null: + v = uint64(1) + default: + return nil + } + case ast.ListKind: + v = uint64(e.expr.AsList().Size()) + case ast.MapKind: + v = uint64(e.expr.AsMap().Size()) + default: + return nil + } + + return &SizeEstimate{Min: v, Max: v} +} + +// SizeEstimate represents an estimated size of a variable length string, bytes, map or list. +type SizeEstimate struct { + Min, Max uint64 +} + +// Add adds to another SizeEstimate and returns the sum. +// If add would result in an uint64 overflow, the result is math.MaxUint64. +func (se SizeEstimate) Add(sizeEstimate SizeEstimate) SizeEstimate { + return SizeEstimate{ + addUint64NoOverflow(se.Min, sizeEstimate.Min), + addUint64NoOverflow(se.Max, sizeEstimate.Max), + } +} + +// Multiply multiplies by another SizeEstimate and returns the product. +// If multiply would result in an uint64 overflow, the result is math.MaxUint64. +func (se SizeEstimate) Multiply(sizeEstimate SizeEstimate) SizeEstimate { + return SizeEstimate{ + multiplyUint64NoOverflow(se.Min, sizeEstimate.Min), + multiplyUint64NoOverflow(se.Max, sizeEstimate.Max), + } +} + +// MultiplyByCostFactor multiplies a SizeEstimate by a cost factor and returns the CostEstimate with the +// nearest integer of the result, rounded up. +func (se SizeEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate { + return CostEstimate{ + multiplyByCostFactor(se.Min, costPerUnit), + multiplyByCostFactor(se.Max, costPerUnit), + } +} + +// MultiplyByCost multiplies by the cost and returns the product. +// If multiply would result in an uint64 overflow, the result is math.MaxUint64. +func (se SizeEstimate) MultiplyByCost(cost CostEstimate) CostEstimate { + return CostEstimate{ + multiplyUint64NoOverflow(se.Min, cost.Min), + multiplyUint64NoOverflow(se.Max, cost.Max), + } +} + +// Union returns a SizeEstimate that encompasses both input the SizeEstimate. +func (se SizeEstimate) Union(size SizeEstimate) SizeEstimate { + result := se + if size.Min < result.Min { + result.Min = size.Min + } + if size.Max > result.Max { + result.Max = size.Max + } + return result +} + +// CostEstimate represents an estimated cost range and provides add and multiply operations +// that do not overflow. +type CostEstimate struct { + Min, Max uint64 +} + +// Add adds the costs and returns the sum. +// If add would result in an uint64 overflow for the min or max, the value is set to math.MaxUint64. +func (ce CostEstimate) Add(cost CostEstimate) CostEstimate { + return CostEstimate{ + addUint64NoOverflow(ce.Min, cost.Min), + addUint64NoOverflow(ce.Max, cost.Max), + } +} + +// Multiply multiplies by the cost and returns the product. +// If multiply would result in an uint64 overflow, the result is math.MaxUint64. +func (ce CostEstimate) Multiply(cost CostEstimate) CostEstimate { + return CostEstimate{ + multiplyUint64NoOverflow(ce.Min, cost.Min), + multiplyUint64NoOverflow(ce.Max, cost.Max), + } +} + +// MultiplyByCostFactor multiplies a CostEstimate by a cost factor and returns the CostEstimate with the +// nearest integer of the result, rounded up. +func (ce CostEstimate) MultiplyByCostFactor(costPerUnit float64) CostEstimate { + return CostEstimate{ + multiplyByCostFactor(ce.Min, costPerUnit), + multiplyByCostFactor(ce.Max, costPerUnit), + } +} + +// Union returns a CostEstimate that encompasses both input the CostEstimates. +func (ce CostEstimate) Union(size CostEstimate) CostEstimate { + result := ce + if size.Min < result.Min { + result.Min = size.Min + } + if size.Max > result.Max { + result.Max = size.Max + } + return result +} + +// addUint64NoOverflow adds non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 +// is returned. +func addUint64NoOverflow(x, y uint64) uint64 { + if y > 0 && x > math.MaxUint64-y { + return math.MaxUint64 + } + return x + y +} + +// multiplyUint64NoOverflow multiplies non-negative ints. If the result is exceeds math.MaxUint64, math.MaxUint64 +// is returned. +func multiplyUint64NoOverflow(x, y uint64) uint64 { + if y != 0 && x > math.MaxUint64/y { + return math.MaxUint64 + } + return x * y +} + +// multiplyByFactor multiplies an integer by a cost factor float and returns the nearest integer value, rounded up. +func multiplyByCostFactor(x uint64, y float64) uint64 { + xFloat := float64(x) + if xFloat > 0 && y > 0 && xFloat > math.MaxUint64/y { + return math.MaxUint64 + } + ceil := math.Ceil(xFloat * y) + if ceil >= doubleTwoTo64 { + return math.MaxUint64 + } + return uint64(ceil) +} + +var ( + selectAndIdentCost = CostEstimate{Min: common.SelectAndIdentCost, Max: common.SelectAndIdentCost} + constCost = CostEstimate{Min: common.ConstCost, Max: common.ConstCost} + + createListBaseCost = CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} + createMapBaseCost = CostEstimate{Min: common.MapCreateBaseCost, Max: common.MapCreateBaseCost} + createMessageBaseCost = CostEstimate{Min: common.StructCreateBaseCost, Max: common.StructCreateBaseCost} +) + +type coster struct { + // exprPath maps from Expr Id to field path. + exprPath map[int64][]string + // iterRanges tracks the iterRange of each iterVar. + iterRanges iterRangeScopes + // computedSizes tracks the computed sizes of call results. + computedSizes map[int64]SizeEstimate + checkedAST *ast.AST + estimator CostEstimator + overloadEstimators map[string]FunctionEstimator + // presenceTestCost will either be a zero or one based on whether has() macros count against cost computations. + presenceTestCost CostEstimate +} + +// Use a stack of iterVar -> iterRange Expr Ids to handle shadowed variable names. +type iterRangeScopes map[string][]int64 + +func (vs iterRangeScopes) push(varName string, expr ast.Expr) { + vs[varName] = append(vs[varName], expr.ID()) +} + +func (vs iterRangeScopes) pop(varName string) { + varStack := vs[varName] + vs[varName] = varStack[:len(varStack)-1] +} + +func (vs iterRangeScopes) peek(varName string) (int64, bool) { + varStack := vs[varName] + if len(varStack) > 0 { + return varStack[len(varStack)-1], true + } + return 0, false +} + +// CostOption configures flags which affect cost computations. +type CostOption func(*coster) error + +// PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// +// Defaults to presence test has a cost of one. +func PresenceTestHasCost(hasCost bool) CostOption { + return func(c *coster) error { + if hasCost { + c.presenceTestCost = selectAndIdentCost + return nil + } + c.presenceTestCost = CostEstimate{Min: 0, Max: 0} + return nil + } +} + +// FunctionEstimator provides a CallEstimate given the target and arguments for a specific function, overload pair. +type FunctionEstimator func(estimator CostEstimator, target *AstNode, args []AstNode) *CallEstimate + +// OverloadCostEstimate binds a FunctionCoster to a specific function overload ID. +// +// When a OverloadCostEstimate is provided, it will override the cost calculation of the CostEstimator provided to +// the Cost() call. +func OverloadCostEstimate(overloadID string, functionCoster FunctionEstimator) CostOption { + return func(c *coster) error { + c.overloadEstimators[overloadID] = functionCoster + return nil + } +} + +// Cost estimates the cost of the parsed and type checked CEL expression. +func Cost(checked *ast.AST, estimator CostEstimator, opts ...CostOption) (CostEstimate, error) { + c := &coster{ + checkedAST: checked, + estimator: estimator, + overloadEstimators: map[string]FunctionEstimator{}, + exprPath: map[int64][]string{}, + iterRanges: map[string][]int64{}, + computedSizes: map[int64]SizeEstimate{}, + presenceTestCost: CostEstimate{Min: 1, Max: 1}, + } + for _, opt := range opts { + err := opt(c) + if err != nil { + return CostEstimate{}, err + } + } + return c.cost(checked.Expr()), nil +} + +func (c *coster) cost(e ast.Expr) CostEstimate { + if e == nil { + return CostEstimate{} + } + var cost CostEstimate + switch e.Kind() { + case ast.LiteralKind: + cost = constCost + case ast.IdentKind: + cost = c.costIdent(e) + case ast.SelectKind: + cost = c.costSelect(e) + case ast.CallKind: + cost = c.costCall(e) + case ast.ListKind: + cost = c.costCreateList(e) + case ast.MapKind: + cost = c.costCreateMap(e) + case ast.StructKind: + cost = c.costCreateStruct(e) + case ast.ComprehensionKind: + cost = c.costComprehension(e) + default: + return CostEstimate{} + } + return cost +} + +func (c *coster) costIdent(e ast.Expr) CostEstimate { + identName := e.AsIdent() + // build and track the field path + if iterRange, ok := c.iterRanges.peek(identName); ok { + switch c.checkedAST.GetType(iterRange).Kind() { + case types.ListKind: + c.addPath(e, append(c.exprPath[iterRange], "@items")) + case types.MapKind: + c.addPath(e, append(c.exprPath[iterRange], "@keys")) + } + } else { + c.addPath(e, []string{identName}) + } + + return selectAndIdentCost +} + +func (c *coster) costSelect(e ast.Expr) CostEstimate { + sel := e.AsSelect() + var sum CostEstimate + if sel.IsTestOnly() { + // recurse, but do not add any cost + // this is equivalent to how evalTestOnly increments the runtime cost counter + // but does not add any additional cost for the qualifier, except here we do + // the reverse (ident adds cost) + sum = sum.Add(c.presenceTestCost) + sum = sum.Add(c.cost(sel.Operand())) + return sum + } + sum = sum.Add(c.cost(sel.Operand())) + targetType := c.getType(sel.Operand()) + switch targetType.Kind() { + case types.MapKind, types.StructKind, types.TypeParamKind: + sum = sum.Add(selectAndIdentCost) + } + + // build and track the field path + c.addPath(e, append(c.getPath(sel.Operand()), sel.FieldName())) + + return sum +} + +func (c *coster) costCall(e ast.Expr) CostEstimate { + call := e.AsCall() + args := call.Args() + + var sum CostEstimate + + argTypes := make([]AstNode, len(args)) + argCosts := make([]CostEstimate, len(args)) + for i, arg := range args { + argCosts[i] = c.cost(arg) + argTypes[i] = c.newAstNode(arg) + } + + overloadIDs := c.checkedAST.GetOverloadIDs(e.ID()) + if len(overloadIDs) == 0 { + return CostEstimate{} + } + var targetType AstNode + if call.IsMemberFunction() { + sum = sum.Add(c.cost(call.Target())) + targetType = c.newAstNode(call.Target()) + } + // Pick a cost estimate range that covers all the overload cost estimation ranges + fnCost := CostEstimate{Min: uint64(math.MaxUint64), Max: 0} + var resultSize *SizeEstimate + for _, overload := range overloadIDs { + overloadCost := c.functionCost(call.FunctionName(), overload, &targetType, argTypes, argCosts) + fnCost = fnCost.Union(overloadCost.CostEstimate) + if overloadCost.ResultSize != nil { + if resultSize == nil { + resultSize = overloadCost.ResultSize + } else { + size := resultSize.Union(*overloadCost.ResultSize) + resultSize = &size + } + } + // build and track the field path for index operations + switch overload { + case overloads.IndexList: + if len(args) > 0 { + c.addPath(e, append(c.getPath(args[0]), "@items")) + } + case overloads.IndexMap: + if len(args) > 0 { + c.addPath(e, append(c.getPath(args[0]), "@values")) + } + } + } + if resultSize != nil { + c.computedSizes[e.ID()] = *resultSize + } + return sum.Add(fnCost) +} + +func (c *coster) costCreateList(e ast.Expr) CostEstimate { + create := e.AsList() + var sum CostEstimate + for _, e := range create.Elements() { + sum = sum.Add(c.cost(e)) + } + return sum.Add(createListBaseCost) +} + +func (c *coster) costCreateMap(e ast.Expr) CostEstimate { + mapVal := e.AsMap() + var sum CostEstimate + for _, ent := range mapVal.Entries() { + entry := ent.AsMapEntry() + sum = sum.Add(c.cost(entry.Key())) + sum = sum.Add(c.cost(entry.Value())) + } + return sum.Add(createMapBaseCost) +} + +func (c *coster) costCreateStruct(e ast.Expr) CostEstimate { + msgVal := e.AsStruct() + var sum CostEstimate + for _, ent := range msgVal.Fields() { + field := ent.AsStructField() + sum = sum.Add(c.cost(field.Value())) + } + return sum.Add(createMessageBaseCost) +} + +func (c *coster) costComprehension(e ast.Expr) CostEstimate { + comp := e.AsComprehension() + var sum CostEstimate + sum = sum.Add(c.cost(comp.IterRange())) + sum = sum.Add(c.cost(comp.AccuInit())) + + // Track the iterRange of each IterVar for field path construction + c.iterRanges.push(comp.IterVar(), comp.IterRange()) + loopCost := c.cost(comp.LoopCondition()) + stepCost := c.cost(comp.LoopStep()) + c.iterRanges.pop(comp.IterVar()) + sum = sum.Add(c.cost(comp.Result())) + rangeCnt := c.sizeEstimate(c.newAstNode(comp.IterRange())) + + c.computedSizes[e.ID()] = rangeCnt + + rangeCost := rangeCnt.MultiplyByCost(stepCost.Add(loopCost)) + sum = sum.Add(rangeCost) + + return sum +} + +func (c *coster) sizeEstimate(t AstNode) SizeEstimate { + if l := t.ComputedSize(); l != nil { + return *l + } + if l := c.estimator.EstimateSize(t); l != nil { + return *l + } + // return an estimate of 1 for return types of set + // lengths, since strings/bytes/more complex objects could be of + // variable length + if isScalar(t.Type()) { + // TODO: since the logic for size estimation is split between + // ComputedSize and isScalar, changing one will likely require changing + // the other, so they should be merged in the future if possible + return SizeEstimate{Min: 1, Max: 1} + } + return SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func (c *coster) functionCost(function, overloadID string, target *AstNode, args []AstNode, argCosts []CostEstimate) CallEstimate { + argCostSum := func() CostEstimate { + var sum CostEstimate + for _, a := range argCosts { + sum = sum.Add(a) + } + return sum + } + if len(c.overloadEstimators) != 0 { + if estimator, found := c.overloadEstimators[overloadID]; found { + if est := estimator(c.estimator, target, args); est != nil { + callEst := *est + return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} + } + } + } + if est := c.estimator.EstimateCallCost(function, overloadID, target, args); est != nil { + callEst := *est + return CallEstimate{CostEstimate: callEst.Add(argCostSum()), ResultSize: est.ResultSize} + } + switch overloadID { + // O(n) functions + case overloads.ExtFormatString: + if target != nil { + // ResultSize not calculated because we can't bound the max size. + return CallEstimate{CostEstimate: c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + case overloads.StringToBytes: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize max is when each char converts to 4 bytes. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min, Max: sz.Max * 4}} + } + case overloads.BytesToString: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize min is when 4 bytes convert to 1 char. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min / 4, Max: sz.Max}} + } + case overloads.ExtQuoteString: + if len(args) == 1 { + sz := c.sizeEstimate(args[0]) + // ResultSize max is when each char is escaped. 2 quote chars always added. + return CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &SizeEstimate{Min: sz.Min + 2, Max: sz.Max*2 + 2}} + } + case overloads.StartsWithString, overloads.EndsWithString: + if len(args) == 1 { + return CallEstimate{CostEstimate: c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + case overloads.InList: + // If a list is composed entirely of constant values this is O(1), but we don't account for that here. + // We just assume all list containment checks are O(n). + if len(args) == 2 { + return CallEstimate{CostEstimate: c.sizeEstimate(args[1]).MultiplyByCostFactor(1).Add(argCostSum())} + } + // O(nm) functions + case overloads.MatchesString: + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + if target != nil && len(args) == 1 { + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := c.sizeEstimate(*target).Add(SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) + return CallEstimate{CostEstimate: strCost.Multiply(regexCost).Add(argCostSum())} + } + case overloads.ContainsString: + if target != nil && len(args) == 1 { + strCost := c.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor) + substrCost := c.sizeEstimate(args[0]).MultiplyByCostFactor(common.StringTraversalCostFactor) + return CallEstimate{CostEstimate: strCost.Multiply(substrCost).Add(argCostSum())} + } + case overloads.LogicalOr, overloads.LogicalAnd: + lhs := argCosts[0] + rhs := argCosts[1] + // min cost is min of LHS for short circuited && or || + argCost := CostEstimate{Min: lhs.Min, Max: lhs.Add(rhs).Max} + return CallEstimate{CostEstimate: argCost} + case overloads.Conditional: + size := c.sizeEstimate(args[1]).Union(c.sizeEstimate(args[2])) + conditionalCost := argCosts[0] + ifTrueCost := argCosts[1] + ifFalseCost := argCosts[2] + argCost := conditionalCost.Add(ifTrueCost.Union(ifFalseCost)) + return CallEstimate{CostEstimate: argCost, ResultSize: &size} + case overloads.AddString, overloads.AddBytes, overloads.AddList: + if len(args) == 2 { + lhsSize := c.sizeEstimate(args[0]) + rhsSize := c.sizeEstimate(args[1]) + resultSize := lhsSize.Add(rhsSize) + switch overloadID { + case overloads.AddList: + // list concatenation is O(1), but we handle it here to track size + return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum()), ResultSize: &resultSize} + default: + return CallEstimate{CostEstimate: resultSize.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum()), ResultSize: &resultSize} + } + } + case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, + overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, + overloads.Equals, overloads.NotEquals: + lhsCost := c.sizeEstimate(args[0]) + rhsCost := c.sizeEstimate(args[1]) + min := uint64(0) + smallestMax := lhsCost.Max + if rhsCost.Max < smallestMax { + smallestMax = rhsCost.Max + } + if smallestMax > 0 { + min = 1 + } + // equality of 2 scalar values results in a cost of 1 + return CallEstimate{CostEstimate: CostEstimate{Min: min, Max: smallestMax}.MultiplyByCostFactor(common.StringTraversalCostFactor).Add(argCostSum())} + } + // O(1) functions + // See CostTracker.costCall for more details about O(1) cost calculations + + // Benchmarks suggest that most of the other operations take +/- 50% of a base cost unit + // which on an Intel xeon 2.20GHz CPU is 50ns. + return CallEstimate{CostEstimate: CostEstimate{Min: 1, Max: 1}.Add(argCostSum())} +} + +func (c *coster) getType(e ast.Expr) *types.Type { + return c.checkedAST.GetType(e.ID()) +} + +func (c *coster) getPath(e ast.Expr) []string { + return c.exprPath[e.ID()] +} + +func (c *coster) addPath(e ast.Expr, path []string) { + c.exprPath[e.ID()] = path +} + +func (c *coster) newAstNode(e ast.Expr) *astNode { + path := c.getPath(e) + if len(path) > 0 && path[0] == parser.AccumulatorName { + // only provide paths to root vars; omit accumulator vars + path = nil + } + var derivedSize *SizeEstimate + if size, ok := c.computedSizes[e.ID()]; ok { + derivedSize = &size + } + return &astNode{ + path: path, + t: c.getType(e), + expr: e, + derivedSize: derivedSize} +} + +// isScalar returns true if the given type is known to be of a constant size at +// compile time. isScalar will return false for strings (they are variable-width) +// in addition to protobuf.Any and protobuf.Value (their size is not knowable at compile time). +func isScalar(t *types.Type) bool { + switch t.Kind() { + case types.BoolKind, types.DoubleKind, types.DurationKind, types.IntKind, types.TimestampKind, types.UintKind: + return true + } + return false +} + +var ( + doubleTwoTo64 = math.Ldexp(1.0, 64) +) diff --git a/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel new file mode 100644 index 00000000..a6b0be29 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/decls/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "decls.go", + ], + importpath = "github.com/google/cel-go/checker/decls", + deps = [ + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/checker/decls/decls.go b/vendor/github.com/google/cel-go/checker/decls/decls.go new file mode 100644 index 00000000..c0e5de46 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/decls/decls.go @@ -0,0 +1,237 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package decls provides helpers for creating variable and function declarations. +package decls + +import ( + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +var ( + // Error type used to communicate issues during type-checking. + Error = &exprpb.Type{ + TypeKind: &exprpb.Type_Error{ + Error: &emptypb.Empty{}}} + + // Dyn is a top-type used to represent any value. + Dyn = &exprpb.Type{ + TypeKind: &exprpb.Type_Dyn{ + Dyn: &emptypb.Empty{}}} +) + +// Commonly used types. +var ( + Bool = NewPrimitiveType(exprpb.Type_BOOL) + Bytes = NewPrimitiveType(exprpb.Type_BYTES) + Double = NewPrimitiveType(exprpb.Type_DOUBLE) + Int = NewPrimitiveType(exprpb.Type_INT64) + Null = &exprpb.Type{ + TypeKind: &exprpb.Type_Null{ + Null: structpb.NullValue_NULL_VALUE}} + String = NewPrimitiveType(exprpb.Type_STRING) + Uint = NewPrimitiveType(exprpb.Type_UINT64) +) + +// Well-known types. +// TODO: Replace with an abstract type registry. +var ( + Any = NewWellKnownType(exprpb.Type_ANY) + Duration = NewWellKnownType(exprpb.Type_DURATION) + Timestamp = NewWellKnownType(exprpb.Type_TIMESTAMP) +) + +// NewAbstractType creates an abstract type declaration which references a proto +// message name and may also include type parameters. +func NewAbstractType(name string, paramTypes ...*exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_AbstractType_{ + AbstractType: &exprpb.Type_AbstractType{ + Name: name, + ParameterTypes: paramTypes}}} +} + +// NewOptionalType constructs an abstract type indicating that the parameterized type +// may be contained within the object. +func NewOptionalType(paramType *exprpb.Type) *exprpb.Type { + return NewAbstractType("optional_type", paramType) +} + +// NewFunctionType creates a function invocation contract, typically only used +// by type-checking steps after overload resolution. +func NewFunctionType(resultType *exprpb.Type, + argTypes ...*exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Function{ + Function: &exprpb.Type_FunctionType{ + ResultType: resultType, + ArgTypes: argTypes}}} +} + +// NewFunction creates a named function declaration with one or more overloads. +func NewFunction(name string, + overloads ...*exprpb.Decl_FunctionDecl_Overload) *exprpb.Decl { + return &exprpb.Decl{ + Name: name, + DeclKind: &exprpb.Decl_Function{ + Function: &exprpb.Decl_FunctionDecl{ + Overloads: overloads}}} +} + +// NewIdent creates a named identifier declaration with an optional literal +// value. +// +// Literal values are typically only associated with enum identifiers. +// +// Deprecated: Use NewVar or NewConst instead. +func NewIdent(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl { + return &exprpb.Decl{ + Name: name, + DeclKind: &exprpb.Decl_Ident{ + Ident: &exprpb.Decl_IdentDecl{ + Type: t, + Value: v}}} +} + +// NewConst creates a constant identifier with a CEL constant literal value. +func NewConst(name string, t *exprpb.Type, v *exprpb.Constant) *exprpb.Decl { + return NewIdent(name, t, v) +} + +// NewVar creates a variable identifier. +func NewVar(name string, t *exprpb.Type) *exprpb.Decl { + return NewIdent(name, t, nil) +} + +// NewInstanceOverload creates a instance function overload contract. +// First element of argTypes is instance. +func NewInstanceOverload(id string, argTypes []*exprpb.Type, + resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + IsInstanceFunction: true} +} + +// NewListType generates a new list with elements of a certain type. +func NewListType(elem *exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_ListType_{ + ListType: &exprpb.Type_ListType{ + ElemType: elem}}} +} + +// NewMapType generates a new map with typed keys and values. +func NewMapType(key *exprpb.Type, value *exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MapType_{ + MapType: &exprpb.Type_MapType{ + KeyType: key, + ValueType: value}}} +} + +// NewObjectType creates an object type for a qualified type name. +func NewObjectType(typeName string) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MessageType{ + MessageType: typeName}} +} + +// NewOverload creates a function overload declaration which contains a unique +// overload id as well as the expected argument and result types. Overloads +// must be aggregated within a Function declaration. +func NewOverload(id string, argTypes []*exprpb.Type, + resultType *exprpb.Type) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + IsInstanceFunction: false} +} + +// NewParameterizedInstanceOverload creates a parametric function instance overload type. +func NewParameterizedInstanceOverload(id string, + argTypes []*exprpb.Type, + resultType *exprpb.Type, + typeParams []string) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + TypeParams: typeParams, + IsInstanceFunction: true} +} + +// NewParameterizedOverload creates a parametric function overload type. +func NewParameterizedOverload(id string, + argTypes []*exprpb.Type, + resultType *exprpb.Type, + typeParams []string) *exprpb.Decl_FunctionDecl_Overload { + return &exprpb.Decl_FunctionDecl_Overload{ + OverloadId: id, + ResultType: resultType, + Params: argTypes, + TypeParams: typeParams, + IsInstanceFunction: false} +} + +// NewPrimitiveType creates a type for a primitive value. See the var declarations +// for Int, Uint, etc. +func NewPrimitiveType(primitive exprpb.Type_PrimitiveType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Primitive{ + Primitive: primitive}} +} + +// NewTypeType creates a new type designating a type. +func NewTypeType(nested *exprpb.Type) *exprpb.Type { + if nested == nil { + // must set the nested field for a valid oneof option + nested = &exprpb.Type{} + } + return &exprpb.Type{ + TypeKind: &exprpb.Type_Type{ + Type: nested}} +} + +// NewTypeParamType creates a type corresponding to a named, contextual parameter. +func NewTypeParamType(name string) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_TypeParam{ + TypeParam: name}} +} + +// NewWellKnownType creates a type corresponding to a protobuf well-known type +// value. +func NewWellKnownType(wellKnown exprpb.Type_WellKnownType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_WellKnown{ + WellKnown: wellKnown}} +} + +// NewWrapperType creates a wrapped primitive type instance. Wrapped types +// are roughly equivalent to a nullable, or optionally valued type. +func NewWrapperType(wrapped *exprpb.Type) *exprpb.Type { + primitive := wrapped.GetPrimitive() + if primitive == exprpb.Type_PRIMITIVE_TYPE_UNSPECIFIED { + // TODO: return an error + panic("Wrapped type must be a primitive") + } + return &exprpb.Type{ + TypeKind: &exprpb.Type_Wrapper{ + Wrapper: primitive}} +} diff --git a/vendor/github.com/google/cel-go/checker/env.go b/vendor/github.com/google/cel-go/checker/env.go new file mode 100644 index 00000000..d5ac0501 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/env.go @@ -0,0 +1,284 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser" +) + +type aggregateLiteralElementType int + +const ( + dynElementType aggregateLiteralElementType = iota + homogenousElementType aggregateLiteralElementType = 1 << iota +) + +var ( + crossTypeNumericComparisonOverloads = map[string]struct{}{ + // double <-> int | uint + overloads.LessDoubleInt64: {}, + overloads.LessDoubleUint64: {}, + overloads.LessEqualsDoubleInt64: {}, + overloads.LessEqualsDoubleUint64: {}, + overloads.GreaterDoubleInt64: {}, + overloads.GreaterDoubleUint64: {}, + overloads.GreaterEqualsDoubleInt64: {}, + overloads.GreaterEqualsDoubleUint64: {}, + // int <-> double | uint + overloads.LessInt64Double: {}, + overloads.LessInt64Uint64: {}, + overloads.LessEqualsInt64Double: {}, + overloads.LessEqualsInt64Uint64: {}, + overloads.GreaterInt64Double: {}, + overloads.GreaterInt64Uint64: {}, + overloads.GreaterEqualsInt64Double: {}, + overloads.GreaterEqualsInt64Uint64: {}, + // uint <-> double | int + overloads.LessUint64Double: {}, + overloads.LessUint64Int64: {}, + overloads.LessEqualsUint64Double: {}, + overloads.LessEqualsUint64Int64: {}, + overloads.GreaterUint64Double: {}, + overloads.GreaterUint64Int64: {}, + overloads.GreaterEqualsUint64Double: {}, + overloads.GreaterEqualsUint64Int64: {}, + } +) + +// Env is the environment for type checking. +// +// The Env is comprised of a container, type provider, declarations, and other related objects +// which can be used to assist with type-checking. +type Env struct { + container *containers.Container + provider types.Provider + declarations *Scopes + aggLitElemType aggregateLiteralElementType + filteredOverloadIDs map[string]struct{} +} + +// NewEnv returns a new *Env with the given parameters. +func NewEnv(container *containers.Container, provider types.Provider, opts ...Option) (*Env, error) { + declarations := newScopes() + declarations.Push() + + envOptions := &options{} + for _, opt := range opts { + if err := opt(envOptions); err != nil { + return nil, err + } + } + aggLitElemType := dynElementType + if envOptions.homogeneousAggregateLiterals { + aggLitElemType = homogenousElementType + } + filteredOverloadIDs := crossTypeNumericComparisonOverloads + if envOptions.crossTypeNumericComparisons { + filteredOverloadIDs = make(map[string]struct{}) + } + if envOptions.validatedDeclarations != nil { + declarations = envOptions.validatedDeclarations.Copy() + } + return &Env{ + container: container, + provider: provider, + declarations: declarations, + aggLitElemType: aggLitElemType, + filteredOverloadIDs: filteredOverloadIDs, + }, nil +} + +// AddIdents configures the checker with a list of variable declarations. +// +// If there are overlapping declarations, the method will error. +func (e *Env) AddIdents(declarations ...*decls.VariableDecl) error { + errMsgs := make([]errorMsg, 0) + for _, d := range declarations { + errMsgs = append(errMsgs, e.addIdent(d)) + } + return formatError(errMsgs) +} + +// AddFunctions configures the checker with a list of function declarations. +// +// If there are overlapping declarations, the method will error. +func (e *Env) AddFunctions(declarations ...*decls.FunctionDecl) error { + errMsgs := make([]errorMsg, 0) + for _, d := range declarations { + errMsgs = append(errMsgs, e.setFunction(d)...) + } + return formatError(errMsgs) +} + +// LookupIdent returns a Decl proto for typeName as an identifier in the Env. +// Returns nil if no such identifier is found in the Env. +func (e *Env) LookupIdent(name string) *decls.VariableDecl { + for _, candidate := range e.container.ResolveCandidateNames(name) { + if ident := e.declarations.FindIdent(candidate); ident != nil { + return ident + } + + // Next try to import the name as a reference to a message type. If found, + // the declaration is added to the outest (global) scope of the + // environment, so next time we can access it faster. + if t, found := e.provider.FindStructType(candidate); found { + decl := decls.NewVariable(candidate, t) + e.declarations.AddIdent(decl) + return decl + } + + if i, found := e.provider.FindIdent(candidate); found { + if t, ok := i.(*types.Type); ok { + decl := decls.NewVariable(candidate, types.NewTypeTypeWithParam(t)) + e.declarations.AddIdent(decl) + return decl + } + } + + // Next try to import this as an enum value by splitting the name in a type prefix and + // the enum inside. + if enumValue := e.provider.EnumValue(candidate); enumValue.Type() != types.ErrType { + decl := decls.NewConstant(candidate, types.IntType, enumValue) + e.declarations.AddIdent(decl) + return decl + } + } + return nil +} + +// LookupFunction returns a Decl proto for typeName as a function in env. +// Returns nil if no such function is found in env. +func (e *Env) LookupFunction(name string) *decls.FunctionDecl { + for _, candidate := range e.container.ResolveCandidateNames(name) { + if fn := e.declarations.FindFunction(candidate); fn != nil { + return fn + } + } + return nil +} + +// setFunction adds the function Decl to the Env. +// Adds a function decl if one doesn't already exist, then adds all overloads from the Decl. +// If overload overlaps with an existing overload, adds to the errors in the Env instead. +func (e *Env) setFunction(fn *decls.FunctionDecl) []errorMsg { + errMsgs := make([]errorMsg, 0) + current := e.declarations.FindFunction(fn.Name()) + if current != nil { + var err error + current, err = current.Merge(fn) + if err != nil { + return append(errMsgs, errorMsg(err.Error())) + } + } else { + current = fn + } + for _, overload := range current.OverloadDecls() { + for _, macro := range parser.AllMacros { + if macro.Function() == current.Name() && + macro.IsReceiverStyle() == overload.IsMemberFunction() && + macro.ArgCount() == len(overload.ArgTypes()) { + errMsgs = append(errMsgs, overlappingMacroError(current.Name(), macro.ArgCount())) + } + } + if len(errMsgs) > 0 { + return errMsgs + } + } + e.declarations.SetFunction(current) + return errMsgs +} + +// addIdent adds the Decl to the declarations in the Env. +// Returns a non-empty errorMsg if the identifier is already declared in the scope. +func (e *Env) addIdent(decl *decls.VariableDecl) errorMsg { + current := e.declarations.FindIdentInScope(decl.Name()) + if current != nil { + if current.DeclarationIsEquivalent(decl) { + return "" + } + return overlappingIdentifierError(decl.Name()) + } + e.declarations.AddIdent(decl) + return "" +} + +// isOverloadDisabled returns whether the overloadID is disabled in the current environment. +func (e *Env) isOverloadDisabled(overloadID string) bool { + _, found := e.filteredOverloadIDs[overloadID] + return found +} + +// validatedDeclarations returns a reference to the validated variable and function declaration scope stack. +// must be copied before use. +func (e *Env) validatedDeclarations() *Scopes { + return e.declarations +} + +// enterScope creates a new Env instance with a new innermost declaration scope. +func (e *Env) enterScope() *Env { + childDecls := e.declarations.Push() + return &Env{ + declarations: childDecls, + container: e.container, + provider: e.provider, + aggLitElemType: e.aggLitElemType, + } +} + +// exitScope creates a new Env instance with the nearest outer declaration scope. +func (e *Env) exitScope() *Env { + parentDecls := e.declarations.Pop() + return &Env{ + declarations: parentDecls, + container: e.container, + provider: e.provider, + aggLitElemType: e.aggLitElemType, + } +} + +// errorMsg is a type alias meant to represent error-based return values which +// may be accumulated into an error at a later point in execution. +type errorMsg string + +func overlappingIdentifierError(name string) errorMsg { + return errorMsg(fmt.Sprintf("overlapping identifier for name '%s'", name)) +} + +func overlappingMacroError(name string, argCount int) errorMsg { + return errorMsg(fmt.Sprintf( + "overlapping macro for name '%s' with %d args", name, argCount)) +} + +func formatError(errMsgs []errorMsg) error { + errStrs := make([]string, 0) + if len(errMsgs) > 0 { + for i := 0; i < len(errMsgs); i++ { + if errMsgs[i] != "" { + errStrs = append(errStrs, string(errMsgs[i])) + } + } + } + if len(errStrs) > 0 { + return fmt.Errorf("%s", strings.Join(errStrs, "\n")) + } + return nil +} diff --git a/vendor/github.com/google/cel-go/checker/errors.go b/vendor/github.com/google/cel-go/checker/errors.go new file mode 100644 index 00000000..8b3bf0b8 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/errors.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" +) + +// typeErrors is a specialization of Errors. +type typeErrors struct { + errs *common.Errors +} + +func (e *typeErrors) fieldTypeMismatch(id int64, l common.Location, name string, field, value *types.Type) { + e.errs.ReportErrorAtID(id, l, "expected type of field '%s' is '%s' but provided type is '%s'", + name, FormatCELType(field), FormatCELType(value)) +} + +func (e *typeErrors) incompatibleType(id int64, l common.Location, ex ast.Expr, prev, next *types.Type) { + e.errs.ReportErrorAtID(id, l, + "incompatible type already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) +} + +func (e *typeErrors) noMatchingOverload(id int64, l common.Location, name string, args []*types.Type, isInstance bool) { + signature := formatFunctionDeclType(nil, args, isInstance) + e.errs.ReportErrorAtID(id, l, "found no matching overload for '%s' applied to '%s'", name, signature) +} + +func (e *typeErrors) notAComprehensionRange(id int64, l common.Location, t *types.Type) { + e.errs.ReportErrorAtID(id, l, "expression of type '%s' cannot be range of a comprehension (must be list, map, or dynamic)", + FormatCELType(t)) +} + +func (e *typeErrors) notAnOptionalFieldSelection(id int64, l common.Location, field ast.Expr) { + e.errs.ReportErrorAtID(id, l, "unsupported optional field selection: %v", field) +} + +func (e *typeErrors) notAType(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "'%s' is not a type", typeName) +} + +func (e *typeErrors) notAMessageType(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "'%s' is not a message type", typeName) +} + +func (e *typeErrors) referenceRedefinition(id int64, l common.Location, ex ast.Expr, prev, next *ast.ReferenceInfo) { + e.errs.ReportErrorAtID(id, l, + "reference already exists for expression: %v(%d) old:%v, new:%v", ex, ex.ID(), prev, next) +} + +func (e *typeErrors) typeDoesNotSupportFieldSelection(id int64, l common.Location, t *types.Type) { + e.errs.ReportErrorAtID(id, l, "type '%s' does not support field selection", FormatCELType(t)) +} + +func (e *typeErrors) typeMismatch(id int64, l common.Location, expected, actual *types.Type) { + e.errs.ReportErrorAtID(id, l, "expected type '%s' but found '%s'", + FormatCELType(expected), FormatCELType(actual)) +} + +func (e *typeErrors) undefinedField(id int64, l common.Location, field string) { + e.errs.ReportErrorAtID(id, l, "undefined field '%s'", field) +} + +func (e *typeErrors) undeclaredReference(id int64, l common.Location, container string, name string) { + e.errs.ReportErrorAtID(id, l, "undeclared reference to '%s' (in container '%s')", name, container) +} + +func (e *typeErrors) unexpectedFailedResolution(id int64, l common.Location, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected failed resolution of '%s'", typeName) +} + +func (e *typeErrors) unexpectedASTType(id int64, l common.Location, kind, typeName string) { + e.errs.ReportErrorAtID(id, l, "unexpected %s type: %v", kind, typeName) +} diff --git a/vendor/github.com/google/cel-go/checker/format.go b/vendor/github.com/google/cel-go/checker/format.go new file mode 100644 index 00000000..95842905 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/format.go @@ -0,0 +1,216 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "fmt" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +const ( + kindUnknown = iota + 1 + kindError + kindFunction + kindDyn + kindPrimitive + kindWellKnown + kindWrapper + kindNull + kindAbstract + kindType + kindList + kindMap + kindObject + kindTypeParam +) + +// FormatCheckedType converts a type message into a string representation. +func FormatCheckedType(t *exprpb.Type) string { + switch kindOf(t) { + case kindDyn: + return "dyn" + case kindFunction: + return formatFunctionExprType(t.GetFunction().GetResultType(), + t.GetFunction().GetArgTypes(), + false) + case kindList: + return fmt.Sprintf("list(%s)", FormatCheckedType(t.GetListType().GetElemType())) + case kindObject: + return t.GetMessageType() + case kindMap: + return fmt.Sprintf("map(%s, %s)", + FormatCheckedType(t.GetMapType().GetKeyType()), + FormatCheckedType(t.GetMapType().GetValueType())) + case kindNull: + return "null" + case kindPrimitive: + switch t.GetPrimitive() { + case exprpb.Type_UINT64: + return "uint" + case exprpb.Type_INT64: + return "int" + } + return strings.Trim(strings.ToLower(t.GetPrimitive().String()), " ") + case kindType: + if t.GetType() == nil || t.GetType().GetTypeKind() == nil { + return "type" + } + return fmt.Sprintf("type(%s)", FormatCheckedType(t.GetType())) + case kindWellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return "any" + case exprpb.Type_DURATION: + return "duration" + case exprpb.Type_TIMESTAMP: + return "timestamp" + } + case kindWrapper: + return fmt.Sprintf("wrapper(%s)", + FormatCheckedType(chkdecls.NewPrimitiveType(t.GetWrapper()))) + case kindError: + return "!error!" + case kindTypeParam: + return t.GetTypeParam() + case kindAbstract: + at := t.GetAbstractType() + params := at.GetParameterTypes() + paramStrs := make([]string, len(params)) + for i, p := range params { + paramStrs[i] = FormatCheckedType(p) + } + return fmt.Sprintf("%s(%s)", at.GetName(), strings.Join(paramStrs, ", ")) + } + return t.String() +} + +type formatter func(any) string + +// FormatCELType formats a types.Type value to a string representation. +// +// The type formatting is identical to FormatCheckedType. +func FormatCELType(t any) string { + dt := t.(*types.Type) + switch dt.Kind() { + case types.AnyKind: + return "any" + case types.DurationKind: + return "duration" + case types.ErrorKind: + return "!error!" + case types.NullTypeKind: + return "null" + case types.TimestampKind: + return "timestamp" + case types.TypeParamKind: + return dt.TypeName() + case types.OpaqueKind: + if dt.TypeName() == "function" { + // There is no explicit function type in the new types representation, so information like + // whether the function is a member function is absent. + return formatFunctionDeclType(dt.Parameters()[0], dt.Parameters()[1:], false) + } + case types.UnspecifiedKind: + return "" + } + if len(dt.Parameters()) == 0 { + return dt.DeclaredTypeName() + } + paramTypeNames := make([]string, 0, len(dt.Parameters())) + for _, p := range dt.Parameters() { + paramTypeNames = append(paramTypeNames, FormatCELType(p)) + } + return fmt.Sprintf("%s(%s)", dt.TypeName(), strings.Join(paramTypeNames, ", ")) +} + +func formatExprType(t any) string { + if t == nil { + return "" + } + return FormatCheckedType(t.(*exprpb.Type)) +} + +func formatFunctionExprType(resultType *exprpb.Type, argTypes []*exprpb.Type, isInstance bool) string { + return formatFunctionInternal[*exprpb.Type](resultType, argTypes, isInstance, formatExprType) +} + +func formatFunctionDeclType(resultType *types.Type, argTypes []*types.Type, isInstance bool) string { + return formatFunctionInternal[*types.Type](resultType, argTypes, isInstance, FormatCELType) +} + +func formatFunctionInternal[T any](resultType T, argTypes []T, isInstance bool, format formatter) string { + result := "" + if isInstance { + target := argTypes[0] + argTypes = argTypes[1:] + result += format(target) + result += "." + } + result += "(" + for i, arg := range argTypes { + if i > 0 { + result += ", " + } + result += format(arg) + } + result += ")" + rt := format(resultType) + if rt != "" { + result += " -> " + result += rt + } + return result +} + +// kindOf returns the kind of the type as defined in the checked.proto. +func kindOf(t *exprpb.Type) int { + if t == nil || t.TypeKind == nil { + return kindUnknown + } + switch t.GetTypeKind().(type) { + case *exprpb.Type_Error: + return kindError + case *exprpb.Type_Function: + return kindFunction + case *exprpb.Type_Dyn: + return kindDyn + case *exprpb.Type_Primitive: + return kindPrimitive + case *exprpb.Type_WellKnown: + return kindWellKnown + case *exprpb.Type_Wrapper: + return kindWrapper + case *exprpb.Type_Null: + return kindNull + case *exprpb.Type_Type: + return kindType + case *exprpb.Type_ListType_: + return kindList + case *exprpb.Type_MapType_: + return kindMap + case *exprpb.Type_MessageType: + return kindObject + case *exprpb.Type_TypeParam: + return kindTypeParam + case *exprpb.Type_AbstractType_: + return kindAbstract + } + return kindUnknown +} diff --git a/vendor/github.com/google/cel-go/checker/mapping.go b/vendor/github.com/google/cel-go/checker/mapping.go new file mode 100644 index 00000000..8163a908 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/mapping.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/types" +) + +type mapping struct { + mapping map[string]*types.Type +} + +func newMapping() *mapping { + return &mapping{ + mapping: make(map[string]*types.Type), + } +} + +func (m *mapping) add(from, to *types.Type) { + m.mapping[FormatCELType(from)] = to +} + +func (m *mapping) find(from *types.Type) (*types.Type, bool) { + if r, found := m.mapping[FormatCELType(from)]; found { + return r, found + } + return nil, false +} + +func (m *mapping) copy() *mapping { + c := newMapping() + + for k, v := range m.mapping { + c.mapping[k] = v + } + return c +} diff --git a/vendor/github.com/google/cel-go/checker/options.go b/vendor/github.com/google/cel-go/checker/options.go new file mode 100644 index 00000000..0560c381 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/options.go @@ -0,0 +1,42 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +type options struct { + crossTypeNumericComparisons bool + homogeneousAggregateLiterals bool + validatedDeclarations *Scopes +} + +// Option is a functional option for configuring the type-checker +type Option func(*options) error + +// CrossTypeNumericComparisons toggles type-checker support for numeric comparisons across type +// See https://github.com/google/cel-spec/wiki/proposal-210 for more details. +func CrossTypeNumericComparisons(enabled bool) Option { + return func(opts *options) error { + opts.crossTypeNumericComparisons = enabled + return nil + } +} + +// ValidatedDeclarations provides a references to validated declarations which will be copied +// into new checker instances. +func ValidatedDeclarations(env *Env) Option { + return func(opts *options) error { + opts.validatedDeclarations = env.validatedDeclarations() + return nil + } +} diff --git a/vendor/github.com/google/cel-go/checker/printer.go b/vendor/github.com/google/cel-go/checker/printer.go new file mode 100644 index 00000000..7a3984f0 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/printer.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "sort" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/debug" +) + +type semanticAdorner struct { + checked *ast.AST +} + +var _ debug.Adorner = &semanticAdorner{} + +func (a *semanticAdorner) GetMetadata(elem any) string { + result := "" + e, isExpr := elem.(ast.Expr) + if !isExpr { + return result + } + t := a.checked.TypeMap()[e.ID()] + if t != nil { + result += "~" + result += FormatCELType(t) + } + + switch e.Kind() { + case ast.IdentKind, + ast.CallKind, + ast.ListKind, + ast.StructKind, + ast.SelectKind: + if ref, found := a.checked.ReferenceMap()[e.ID()]; found { + if len(ref.OverloadIDs) == 0 { + result += "^" + ref.Name + } else { + sort.Strings(ref.OverloadIDs) + for i, overload := range ref.OverloadIDs { + if i == 0 { + result += "^" + } else { + result += "|" + } + result += overload + } + } + } + } + + return result +} + +// Print returns a string representation of the Expr message, +// annotated with types from the CheckedExpr. The Expr must +// be a sub-expression embedded in the CheckedExpr. +func Print(e ast.Expr, checked *ast.AST) string { + a := &semanticAdorner{checked: checked} + return debug.ToAdornedDebugString(e, a) +} diff --git a/vendor/github.com/google/cel-go/checker/scopes.go b/vendor/github.com/google/cel-go/checker/scopes.go new file mode 100644 index 00000000..8bb73ddb --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/scopes.go @@ -0,0 +1,147 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/decls" +) + +// Scopes represents nested Decl sets where the Scopes value contains a Groups containing all +// identifiers in scope and an optional parent representing outer scopes. +// Each Groups value is a mapping of names to Decls in the ident and function namespaces. +// Lookups are performed such that bindings in inner scopes shadow those in outer scopes. +type Scopes struct { + parent *Scopes + scopes *Group +} + +// newScopes creates a new, empty Scopes. +// Some operations can't be safely performed until a Group is added with Push. +func newScopes() *Scopes { + return &Scopes{ + scopes: newGroup(), + } +} + +// Copy creates a copy of the current Scopes values, including a copy of its parent if non-nil. +func (s *Scopes) Copy() *Scopes { + cpy := newScopes() + if s == nil { + return cpy + } + if s.parent != nil { + cpy.parent = s.parent.Copy() + } + cpy.scopes = s.scopes.copy() + return cpy +} + +// Push creates a new Scopes value which references the current Scope as its parent. +func (s *Scopes) Push() *Scopes { + return &Scopes{ + parent: s, + scopes: newGroup(), + } +} + +// Pop returns the parent Scopes value for the current scope, or the current scope if the parent +// is nil. +func (s *Scopes) Pop() *Scopes { + if s.parent != nil { + return s.parent + } + // TODO: Consider whether this should be an error / panic. + return s +} + +// AddIdent adds the ident Decl in the current scope. +// Note: If the name collides with an existing identifier in the scope, the Decl is overwritten. +func (s *Scopes) AddIdent(decl *decls.VariableDecl) { + s.scopes.idents[decl.Name()] = decl +} + +// FindIdent finds the first ident Decl with a matching name in Scopes, or nil if one cannot be +// found. +// Note: The search is performed from innermost to outermost. +func (s *Scopes) FindIdent(name string) *decls.VariableDecl { + if ident, found := s.scopes.idents[name]; found { + return ident + } + if s.parent != nil { + return s.parent.FindIdent(name) + } + return nil +} + +// FindIdentInScope finds the first ident Decl with a matching name in the current Scopes value, or +// nil if one does not exist. +// Note: The search is only performed on the current scope and does not search outer scopes. +func (s *Scopes) FindIdentInScope(name string) *decls.VariableDecl { + if ident, found := s.scopes.idents[name]; found { + return ident + } + return nil +} + +// SetFunction adds the function Decl to the current scope. +// Note: Any previous entry for a function in the current scope with the same name is overwritten. +func (s *Scopes) SetFunction(fn *decls.FunctionDecl) { + s.scopes.functions[fn.Name()] = fn +} + +// FindFunction finds the first function Decl with a matching name in Scopes. +// The search is performed from innermost to outermost. +// Returns nil if no such function in Scopes. +func (s *Scopes) FindFunction(name string) *decls.FunctionDecl { + if fn, found := s.scopes.functions[name]; found { + return fn + } + if s.parent != nil { + return s.parent.FindFunction(name) + } + return nil +} + +// Group is a set of Decls that is pushed on or popped off a Scopes as a unit. +// Contains separate namespaces for identifier and function Decls. +// (Should be named "Scope" perhaps?) +type Group struct { + idents map[string]*decls.VariableDecl + functions map[string]*decls.FunctionDecl +} + +// copy creates a new Group instance with a shallow copy of the variables and functions. +// If callers need to mutate the exprpb.Decl definitions for a Function, they should copy-on-write. +func (g *Group) copy() *Group { + cpy := &Group{ + idents: make(map[string]*decls.VariableDecl, len(g.idents)), + functions: make(map[string]*decls.FunctionDecl, len(g.functions)), + } + for n, id := range g.idents { + cpy.idents[n] = id + } + for n, fn := range g.functions { + cpy.functions[n] = fn + } + return cpy +} + +// newGroup creates a new Group with empty maps for identifiers and functions. +func newGroup() *Group { + return &Group{ + idents: make(map[string]*decls.VariableDecl), + functions: make(map[string]*decls.FunctionDecl), + } +} diff --git a/vendor/github.com/google/cel-go/checker/types.go b/vendor/github.com/google/cel-go/checker/types.go new file mode 100644 index 00000000..4c65b273 --- /dev/null +++ b/vendor/github.com/google/cel-go/checker/types.go @@ -0,0 +1,314 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package checker + +import ( + "github.com/google/cel-go/common/types" +) + +// isDyn returns true if the input t is either type DYN or a well-known ANY message. +func isDyn(t *types.Type) bool { + // Note: object type values that are well-known and map to a DYN value in practice + // are sanitized prior to being added to the environment. + switch t.Kind() { + case types.DynKind, types.AnyKind: + return true + default: + return false + } +} + +// isDynOrError returns true if the input is either an Error, DYN, or well-known ANY message. +func isDynOrError(t *types.Type) bool { + return isError(t) || isDyn(t) +} + +func isError(t *types.Type) bool { + return t.Kind() == types.ErrorKind +} + +func isOptional(t *types.Type) bool { + if t.Kind() == types.OpaqueKind { + return t.TypeName() == "optional_type" + } + return false +} + +func maybeUnwrapOptional(t *types.Type) (*types.Type, bool) { + if isOptional(t) { + return t.Parameters()[0], true + } + return t, false +} + +// isEqualOrLessSpecific checks whether one type is equal or less specific than the other one. +// A type is less specific if it matches the other type using the DYN type. +func isEqualOrLessSpecific(t1, t2 *types.Type) bool { + kind1, kind2 := t1.Kind(), t2.Kind() + // The first type is less specific. + if isDyn(t1) || kind1 == types.TypeParamKind { + return true + } + // The first type is not less specific. + if isDyn(t2) || kind2 == types.TypeParamKind { + return false + } + // Types must be of the same kind to be equal. + if kind1 != kind2 { + return false + } + + // With limited exceptions for ANY and JSON values, the types must agree and be equivalent in + // order to return true. + switch kind1 { + case types.OpaqueKind: + if t1.TypeName() != t2.TypeName() || + len(t1.Parameters()) != len(t2.Parameters()) { + return false + } + for i, p1 := range t1.Parameters() { + if !isEqualOrLessSpecific(p1, t2.Parameters()[i]) { + return false + } + } + return true + case types.ListKind: + return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) + case types.MapKind: + return isEqualOrLessSpecific(t1.Parameters()[0], t2.Parameters()[0]) && + isEqualOrLessSpecific(t1.Parameters()[1], t2.Parameters()[1]) + case types.TypeKind: + return true + default: + return t1.IsExactType(t2) + } +} + +// / internalIsAssignable returns true if t1 is assignable to t2. +func internalIsAssignable(m *mapping, t1, t2 *types.Type) bool { + // Process type parameters. + kind1, kind2 := t1.Kind(), t2.Kind() + if kind2 == types.TypeParamKind { + // If t2 is a valid type substitution for t1, return true. + valid, t2HasSub := isValidTypeSubstitution(m, t1, t2) + if valid { + return true + } + // If t2 is not a valid type sub for t1, and already has a known substitution return false + // since it is not possible for t1 to be a substitution for t2. + if !valid && t2HasSub { + return false + } + // Otherwise, fall through to check whether t1 is a possible substitution for t2. + } + if kind1 == types.TypeParamKind { + // Return whether t1 is a valid substitution for t2. If not, do no additional checks as the + // possible type substitutions have been searched in both directions. + valid, _ := isValidTypeSubstitution(m, t2, t1) + return valid + } + + // Next check for wildcard types. + if isDynOrError(t1) || isDynOrError(t2) { + return true + } + // Preserve the nullness checks of the legacy type-checker. + if kind1 == types.NullTypeKind { + return internalIsAssignableNull(t2) + } + if kind2 == types.NullTypeKind { + return internalIsAssignableNull(t1) + } + + // Test for when the types do not need to agree, but are more specific than dyn. + switch kind1 { + case types.BoolKind, types.BytesKind, types.DoubleKind, types.IntKind, types.StringKind, types.UintKind, + types.AnyKind, types.DurationKind, types.TimestampKind, + types.StructKind: + // Test whether t2 is assignable from t1. The order of this check won't usually matter; + // however, there may be cases where type capabilities are expanded beyond what is supported + // in the current common/types package. For example, an interface designation for a group of + // Struct types. + return t2.IsAssignableType(t1) + case types.TypeKind: + return kind2 == types.TypeKind + case types.OpaqueKind, types.ListKind, types.MapKind: + return t1.Kind() == t2.Kind() && t1.TypeName() == t2.TypeName() && + internalIsAssignableList(m, t1.Parameters(), t2.Parameters()) + default: + return false + } +} + +// isValidTypeSubstitution returns whether t2 (or its type substitution) is a valid type +// substitution for t1, and whether t2 has a type substitution in mapping m. +// +// The type t2 is a valid substitution for t1 if any of the following statements is true +// - t2 has a type substitution (t2sub) equal to t1 +// - t2 has a type substitution (t2sub) assignable to t1 +// - t2 does not occur within t1. +func isValidTypeSubstitution(m *mapping, t1, t2 *types.Type) (valid, hasSub bool) { + // Early return if the t1 and t2 are the same instance. + kind1, kind2 := t1.Kind(), t2.Kind() + if kind1 == kind2 && t1.IsExactType(t2) { + return true, true + } + if t2Sub, found := m.find(t2); found { + // Early return if t1 and t2Sub are the same instance as otherwise the mapping + // might mark a type as being a subtitution for itself. + if kind1 == t2Sub.Kind() && t1.IsExactType(t2Sub) { + return true, true + } + // If the types are compatible, pick the more general type and return true + if internalIsAssignable(m, t1, t2Sub) { + t2New := mostGeneral(t1, t2Sub) + // only update the type reference map if the target type does not occur within it. + if notReferencedIn(m, t2, t2New) { + m.add(t2, t2New) + } + // acknowledge the type agreement, and that the substitution is already tracked. + return true, true + } + return false, true + } + if notReferencedIn(m, t2, t1) { + m.add(t2, t1) + return true, false + } + return false, false +} + +// internalIsAssignableList returns true if the element types at each index in the list are +// assignable from l1[i] to l2[i]. The list lengths must also agree for the lists to be +// assignable. +func internalIsAssignableList(m *mapping, l1, l2 []*types.Type) bool { + if len(l1) != len(l2) { + return false + } + for i, t1 := range l1 { + if !internalIsAssignable(m, t1, l2[i]) { + return false + } + } + return true +} + +// internalIsAssignableNull returns true if the type is nullable. +func internalIsAssignableNull(t *types.Type) bool { + return isLegacyNullable(t) || t.IsAssignableType(types.NullType) +} + +// isLegacyNullable preserves the null-ness compatibility of the original type-checker implementation. +func isLegacyNullable(t *types.Type) bool { + switch t.Kind() { + case types.OpaqueKind, types.StructKind, types.AnyKind, types.DurationKind, types.TimestampKind: + return true + } + return false +} + +// isAssignable returns an updated type substitution mapping if t1 is assignable to t2. +func isAssignable(m *mapping, t1, t2 *types.Type) *mapping { + mCopy := m.copy() + if internalIsAssignable(mCopy, t1, t2) { + return mCopy + } + return nil +} + +// isAssignableList returns an updated type substitution mapping if l1 is assignable to l2. +func isAssignableList(m *mapping, l1, l2 []*types.Type) *mapping { + mCopy := m.copy() + if internalIsAssignableList(mCopy, l1, l2) { + return mCopy + } + return nil +} + +// mostGeneral returns the more general of two types which are known to unify. +func mostGeneral(t1, t2 *types.Type) *types.Type { + if isEqualOrLessSpecific(t1, t2) { + return t1 + } + return t2 +} + +// notReferencedIn checks whether the type doesn't appear directly or transitively within the other +// type. This is a standard requirement for type unification, commonly referred to as the "occurs +// check". +func notReferencedIn(m *mapping, t, withinType *types.Type) bool { + if t.IsExactType(withinType) { + return false + } + withinKind := withinType.Kind() + switch withinKind { + case types.TypeParamKind: + wtSub, found := m.find(withinType) + if !found { + return true + } + return notReferencedIn(m, t, wtSub) + case types.OpaqueKind, types.ListKind, types.MapKind, types.TypeKind: + for _, pt := range withinType.Parameters() { + if !notReferencedIn(m, t, pt) { + return false + } + } + return true + default: + return true + } +} + +// substitute replaces all direct and indirect occurrences of bound type parameters. Unbound type +// parameters are replaced by DYN if typeParamToDyn is true. +func substitute(m *mapping, t *types.Type, typeParamToDyn bool) *types.Type { + if tSub, found := m.find(t); found { + return substitute(m, tSub, typeParamToDyn) + } + kind := t.Kind() + if typeParamToDyn && kind == types.TypeParamKind { + return types.DynType + } + switch kind { + case types.OpaqueKind: + return types.NewOpaqueType(t.TypeName(), substituteParams(m, t.Parameters(), typeParamToDyn)...) + case types.ListKind: + return types.NewListType(substitute(m, t.Parameters()[0], typeParamToDyn)) + case types.MapKind: + return types.NewMapType(substitute(m, t.Parameters()[0], typeParamToDyn), + substitute(m, t.Parameters()[1], typeParamToDyn)) + case types.TypeKind: + if len(t.Parameters()) > 0 { + tParam := t.Parameters()[0] + return types.NewTypeTypeWithParam(substitute(m, tParam, typeParamToDyn)) + } + return t + default: + return t + } +} + +func substituteParams(m *mapping, typeParams []*types.Type, typeParamToDyn bool) []*types.Type { + subParams := make([]*types.Type, len(typeParams)) + for i, tp := range typeParams { + subParams[i] = substitute(m, tp, typeParamToDyn) + } + return subParams +} + +func newFunctionType(resultType *types.Type, argTypes ...*types.Type) *types.Type { + return types.NewOpaqueType("function", append([]*types.Type{resultType}, argTypes...)...) +} diff --git a/vendor/github.com/google/cel-go/common/BUILD.bazel b/vendor/github.com/google/cel-go/common/BUILD.bazel new file mode 100644 index 00000000..eef7f281 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/BUILD.bazel @@ -0,0 +1,34 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "cost.go", + "error.go", + "errors.go", + "location.go", + "source.go", + ], + importpath = "github.com/google/cel-go/common", + deps = [ + "//common/runes:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "errors_test.go", + "source_test.go", + ], + embed = [ + ":go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/ast/BUILD.bazel b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel new file mode 100644 index 00000000..5c40c378 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/BUILD.bazel @@ -0,0 +1,54 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "ast.go", + "conversion.go", + "expr.go", + "factory.go", + "navigable.go", + ], + importpath = "github.com/google/cel-go/common/ast", + deps = [ + "//common:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "ast_test.go", + "conversion_test.go", + "expr_test.go", + "navigable_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//checker:go_default_library", + "//checker/decls:go_default_library", + "//common:go_default_library", + "//common/containers:go_default_library", + "//common/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//parser:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//encoding/prototext:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/ast/ast.go b/vendor/github.com/google/cel-go/common/ast/ast.go new file mode 100644 index 00000000..b807669d --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/ast.go @@ -0,0 +1,457 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ast declares data structures useful for parsed and checked abstract syntax trees +package ast + +import ( + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// AST contains a protobuf expression and source info along with CEL-native type and reference information. +type AST struct { + expr Expr + sourceInfo *SourceInfo + typeMap map[int64]*types.Type + refMap map[int64]*ReferenceInfo +} + +// Expr returns the root ast.Expr value in the AST. +func (a *AST) Expr() Expr { + if a == nil { + return nilExpr + } + return a.expr +} + +// SourceInfo returns the source metadata associated with the parse / type-check passes. +func (a *AST) SourceInfo() *SourceInfo { + if a == nil { + return nil + } + return a.sourceInfo +} + +// GetType returns the type for the expression at the given id, if one exists, else types.DynType. +func (a *AST) GetType(id int64) *types.Type { + if t, found := a.TypeMap()[id]; found { + return t + } + return types.DynType +} + +// SetType sets the type of the expression node at the given id. +func (a *AST) SetType(id int64, t *types.Type) { + if a == nil { + return + } + a.typeMap[id] = t +} + +// TypeMap returns the map of expression ids to type-checked types. +// +// If the AST is not type-checked, the map will be empty. +func (a *AST) TypeMap() map[int64]*types.Type { + if a == nil { + return map[int64]*types.Type{} + } + return a.typeMap +} + +// GetOverloadIDs returns the set of overload function names for a given expression id. +// +// If the expression id is not a function call, or the AST is not type-checked, the result will be empty. +func (a *AST) GetOverloadIDs(id int64) []string { + if ref, found := a.ReferenceMap()[id]; found { + return ref.OverloadIDs + } + return []string{} +} + +// ReferenceMap returns the map of expression id to identifier, constant, and function references. +func (a *AST) ReferenceMap() map[int64]*ReferenceInfo { + if a == nil { + return map[int64]*ReferenceInfo{} + } + return a.refMap +} + +// SetReference adds a reference to the checked AST type map. +func (a *AST) SetReference(id int64, r *ReferenceInfo) { + if a == nil { + return + } + a.refMap[id] = r +} + +// IsChecked returns whether the AST is type-checked. +func (a *AST) IsChecked() bool { + return a != nil && len(a.TypeMap()) > 0 +} + +// NewAST creates a base AST instance with an ast.Expr and ast.SourceInfo value. +func NewAST(e Expr, sourceInfo *SourceInfo) *AST { + if e == nil { + e = nilExpr + } + return &AST{ + expr: e, + sourceInfo: sourceInfo, + typeMap: make(map[int64]*types.Type), + refMap: make(map[int64]*ReferenceInfo), + } +} + +// NewCheckedAST wraps an parsed AST and augments it with type and reference metadata. +func NewCheckedAST(parsed *AST, typeMap map[int64]*types.Type, refMap map[int64]*ReferenceInfo) *AST { + return &AST{ + expr: parsed.Expr(), + sourceInfo: parsed.SourceInfo(), + typeMap: typeMap, + refMap: refMap, + } +} + +// Copy creates a deep copy of the Expr and SourceInfo values in the input AST. +// +// Copies of the Expr value are generated using an internal default ExprFactory. +func Copy(a *AST) *AST { + if a == nil { + return nil + } + e := defaultFactory.CopyExpr(a.expr) + if !a.IsChecked() { + return NewAST(e, CopySourceInfo(a.SourceInfo())) + } + typesCopy := make(map[int64]*types.Type, len(a.typeMap)) + for id, t := range a.typeMap { + typesCopy[id] = t + } + refsCopy := make(map[int64]*ReferenceInfo, len(a.refMap)) + for id, r := range a.refMap { + refsCopy[id] = r + } + return NewCheckedAST(NewAST(e, CopySourceInfo(a.SourceInfo())), typesCopy, refsCopy) +} + +// MaxID returns the upper-bound, non-inclusive, of ids present within the AST's Expr value. +func MaxID(a *AST) int64 { + visitor := &maxIDVisitor{maxID: 1} + PostOrderVisit(a.Expr(), visitor) + for id, call := range a.SourceInfo().MacroCalls() { + PostOrderVisit(call, visitor) + if id > visitor.maxID { + visitor.maxID = id + 1 + } + } + return visitor.maxID + 1 +} + +// NewSourceInfo creates a simple SourceInfo object from an input common.Source value. +func NewSourceInfo(src common.Source) *SourceInfo { + var lineOffsets []int32 + var desc string + baseLine := int32(0) + baseCol := int32(0) + if src != nil { + desc = src.Description() + lineOffsets = src.LineOffsets() + // Determine whether the source metadata should be computed relative + // to a base line and column value. This can be determined by requesting + // the location for offset 0 from the source object. + if loc, found := src.OffsetLocation(0); found { + baseLine = int32(loc.Line()) - 1 + baseCol = int32(loc.Column()) + } + } + return &SourceInfo{ + desc: desc, + lines: lineOffsets, + baseLine: baseLine, + baseCol: baseCol, + offsetRanges: make(map[int64]OffsetRange), + macroCalls: make(map[int64]Expr), + } +} + +// CopySourceInfo creates a deep copy of the MacroCalls within the input SourceInfo. +// +// Copies of macro Expr values are generated using an internal default ExprFactory. +func CopySourceInfo(info *SourceInfo) *SourceInfo { + if info == nil { + return nil + } + rangesCopy := make(map[int64]OffsetRange, len(info.offsetRanges)) + for id, off := range info.offsetRanges { + rangesCopy[id] = off + } + callsCopy := make(map[int64]Expr, len(info.macroCalls)) + for id, call := range info.macroCalls { + callsCopy[id] = defaultFactory.CopyExpr(call) + } + return &SourceInfo{ + syntax: info.syntax, + desc: info.desc, + lines: info.lines, + baseLine: info.baseLine, + baseCol: info.baseCol, + offsetRanges: rangesCopy, + macroCalls: callsCopy, + } +} + +// SourceInfo records basic information about the expression as a textual input and +// as a parsed expression value. +type SourceInfo struct { + syntax string + desc string + lines []int32 + baseLine int32 + baseCol int32 + offsetRanges map[int64]OffsetRange + macroCalls map[int64]Expr +} + +// SyntaxVersion returns the syntax version associated with the text expression. +func (s *SourceInfo) SyntaxVersion() string { + if s == nil { + return "" + } + return s.syntax +} + +// Description provides information about where the expression came from. +func (s *SourceInfo) Description() string { + if s == nil { + return "" + } + return s.desc +} + +// LineOffsets returns a list of the 0-based character offsets in the input text where newlines appear. +func (s *SourceInfo) LineOffsets() []int32 { + if s == nil { + return []int32{} + } + return s.lines +} + +// MacroCalls returns a map of expression id to ast.Expr value where the id represents the expression +// node where the macro was inserted into the AST, and the ast.Expr value represents the original call +// signature which was replaced. +func (s *SourceInfo) MacroCalls() map[int64]Expr { + if s == nil { + return map[int64]Expr{} + } + return s.macroCalls +} + +// GetMacroCall returns the original ast.Expr value for the given expression if it was generated via +// a macro replacement. +// +// Note, parsing options must be enabled to track macro calls before this method will return a value. +func (s *SourceInfo) GetMacroCall(id int64) (Expr, bool) { + e, found := s.MacroCalls()[id] + return e, found +} + +// SetMacroCall records a macro call at a specific location. +func (s *SourceInfo) SetMacroCall(id int64, e Expr) { + if s != nil { + s.macroCalls[id] = e + } +} + +// ClearMacroCall removes the macro call at the given expression id. +func (s *SourceInfo) ClearMacroCall(id int64) { + if s != nil { + delete(s.macroCalls, id) + } +} + +// OffsetRanges returns a map of expression id to OffsetRange values where the range indicates either: +// the start and end position in the input stream where the expression occurs, or the start position +// only. If the range only captures start position, the stop position of the range will be equal to +// the start. +func (s *SourceInfo) OffsetRanges() map[int64]OffsetRange { + if s == nil { + return map[int64]OffsetRange{} + } + return s.offsetRanges +} + +// GetOffsetRange retrieves an OffsetRange for the given expression id if one exists. +func (s *SourceInfo) GetOffsetRange(id int64) (OffsetRange, bool) { + if s == nil { + return OffsetRange{}, false + } + o, found := s.offsetRanges[id] + return o, found +} + +// SetOffsetRange sets the OffsetRange for the given expression id. +func (s *SourceInfo) SetOffsetRange(id int64, o OffsetRange) { + if s == nil { + return + } + s.offsetRanges[id] = o +} + +// ClearOffsetRange removes the OffsetRange for the given expression id. +func (s *SourceInfo) ClearOffsetRange(id int64) { + if s != nil { + delete(s.offsetRanges, id) + } +} + +// GetStartLocation calculates the human-readable 1-based line and 0-based column of the first character +// of the expression node at the id. +func (s *SourceInfo) GetStartLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Start) + } + return common.NoLocation +} + +// GetStopLocation calculates the human-readable 1-based line and 0-based column of the last character for +// the expression node at the given id. +// +// If the SourceInfo was generated from a serialized protobuf representation, the stop location will +// be identical to the start location for the expression. +func (s *SourceInfo) GetStopLocation(id int64) common.Location { + if o, found := s.GetOffsetRange(id); found { + return s.GetLocationByOffset(o.Stop) + } + return common.NoLocation +} + +// GetLocationByOffset returns the line and column information for a given character offset. +func (s *SourceInfo) GetLocationByOffset(offset int32) common.Location { + line := 1 + col := int(offset) + for _, lineOffset := range s.LineOffsets() { + if lineOffset > offset { + break + } + line++ + col = int(offset - lineOffset) + } + return common.NewLocation(line, col) +} + +// ComputeOffset calculates the 0-based character offset from a 1-based line and 0-based column. +func (s *SourceInfo) ComputeOffset(line, col int32) int32 { + if s != nil { + line = s.baseLine + line + col = s.baseCol + col + } + if line == 1 { + return col + } + if line < 1 || line > int32(len(s.LineOffsets())) { + return -1 + } + offset := s.LineOffsets()[line-2] + return offset + col +} + +// OffsetRange captures the start and stop positions of a section of text in the input expression. +type OffsetRange struct { + Start int32 + Stop int32 +} + +// ReferenceInfo contains a CEL native representation of an identifier reference which may refer to +// either a qualified identifier name, a set of overload ids, or a constant value from an enum. +type ReferenceInfo struct { + Name string + OverloadIDs []string + Value ref.Val +} + +// NewIdentReference creates a ReferenceInfo instance for an identifier with an optional constant value. +func NewIdentReference(name string, value ref.Val) *ReferenceInfo { + return &ReferenceInfo{Name: name, Value: value} +} + +// NewFunctionReference creates a ReferenceInfo instance for a set of function overloads. +func NewFunctionReference(overloads ...string) *ReferenceInfo { + info := &ReferenceInfo{} + for _, id := range overloads { + info.AddOverload(id) + } + return info +} + +// AddOverload appends a function overload ID to the ReferenceInfo. +func (r *ReferenceInfo) AddOverload(overloadID string) { + for _, id := range r.OverloadIDs { + if id == overloadID { + return + } + } + r.OverloadIDs = append(r.OverloadIDs, overloadID) +} + +// Equals returns whether two references are identical to each other. +func (r *ReferenceInfo) Equals(other *ReferenceInfo) bool { + if r.Name != other.Name { + return false + } + if len(r.OverloadIDs) != len(other.OverloadIDs) { + return false + } + if len(r.OverloadIDs) != 0 { + overloadMap := make(map[string]struct{}, len(r.OverloadIDs)) + for _, id := range r.OverloadIDs { + overloadMap[id] = struct{}{} + } + for _, id := range other.OverloadIDs { + _, found := overloadMap[id] + if !found { + return false + } + } + } + if r.Value == nil && other.Value == nil { + return true + } + if r.Value == nil && other.Value != nil || + r.Value != nil && other.Value == nil || + r.Value.Equal(other.Value) != types.True { + return false + } + return true +} + +type maxIDVisitor struct { + maxID int64 + *baseVisitor +} + +// VisitExpr updates the max identifier if the incoming expression id is greater than previously observed. +func (v *maxIDVisitor) VisitExpr(e Expr) { + if v.maxID < e.ID() { + v.maxID = e.ID() + } +} + +// VisitEntryExpr updates the max identifier if the incoming entry id is greater than previously observed. +func (v *maxIDVisitor) VisitEntryExpr(e EntryExpr) { + if v.maxID < e.ID() { + v.maxID = e.ID() + } +} diff --git a/vendor/github.com/google/cel-go/common/ast/conversion.go b/vendor/github.com/google/cel-go/common/ast/conversion.go new file mode 100644 index 00000000..8f2c4bd1 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/conversion.go @@ -0,0 +1,632 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "fmt" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + structpb "google.golang.org/protobuf/types/known/structpb" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// ToProto converts an AST to a CheckedExpr protobouf. +func ToProto(ast *AST) (*exprpb.CheckedExpr, error) { + refMap := make(map[int64]*exprpb.Reference, len(ast.ReferenceMap())) + for id, ref := range ast.ReferenceMap() { + r, err := ReferenceInfoToProto(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*exprpb.Type, len(ast.TypeMap())) + for id, typ := range ast.TypeMap() { + t, err := types.TypeToExprType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + e, err := ExprToProto(ast.Expr()) + if err != nil { + return nil, err + } + info, err := SourceInfoToProto(ast.SourceInfo()) + if err != nil { + return nil, err + } + return &exprpb.CheckedExpr{ + Expr: e, + SourceInfo: info, + ReferenceMap: refMap, + TypeMap: typeMap, + }, nil +} + +// ToAST converts a CheckedExpr protobuf to an AST instance. +func ToAST(checked *exprpb.CheckedExpr) (*AST, error) { + refMap := make(map[int64]*ReferenceInfo, len(checked.GetReferenceMap())) + for id, ref := range checked.GetReferenceMap() { + r, err := ProtoToReferenceInfo(ref) + if err != nil { + return nil, err + } + refMap[id] = r + } + typeMap := make(map[int64]*types.Type, len(checked.GetTypeMap())) + for id, typ := range checked.GetTypeMap() { + t, err := types.ExprTypeToType(typ) + if err != nil { + return nil, err + } + typeMap[id] = t + } + info, err := ProtoToSourceInfo(checked.GetSourceInfo()) + if err != nil { + return nil, err + } + root, err := ProtoToExpr(checked.GetExpr()) + if err != nil { + return nil, err + } + ast := NewCheckedAST(NewAST(root, info), typeMap, refMap) + return ast, nil +} + +// ProtoToExpr converts a protobuf Expr value to an ast.Expr value. +func ProtoToExpr(e *exprpb.Expr) (Expr, error) { + factory := NewExprFactory() + return exprInternal(factory, e) +} + +// ProtoToEntryExpr converts a protobuf struct/map entry to an ast.EntryExpr +func ProtoToEntryExpr(e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + factory := NewExprFactory() + switch e.GetKeyKind().(type) { + case *exprpb.Expr_CreateStruct_Entry_FieldKey: + return exprStructField(factory, e.GetId(), e) + case *exprpb.Expr_CreateStruct_Entry_MapKey: + return exprMapEntry(factory, e.GetId(), e) + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func exprInternal(factory ExprFactory, e *exprpb.Expr) (Expr, error) { + id := e.GetId() + switch e.GetExprKind().(type) { + case *exprpb.Expr_CallExpr: + return exprCall(factory, id, e.GetCallExpr()) + case *exprpb.Expr_ComprehensionExpr: + return exprComprehension(factory, id, e.GetComprehensionExpr()) + case *exprpb.Expr_ConstExpr: + return exprLiteral(factory, id, e.GetConstExpr()) + case *exprpb.Expr_IdentExpr: + return exprIdent(factory, id, e.GetIdentExpr()) + case *exprpb.Expr_ListExpr: + return exprList(factory, id, e.GetListExpr()) + case *exprpb.Expr_SelectExpr: + return exprSelect(factory, id, e.GetSelectExpr()) + case *exprpb.Expr_StructExpr: + s := e.GetStructExpr() + if s.GetMessageName() != "" { + return exprStruct(factory, id, s) + } + return exprMap(factory, id, s) + } + return factory.NewUnspecifiedExpr(id), nil +} + +func exprCall(factory ExprFactory, id int64, call *exprpb.Expr_Call) (Expr, error) { + var err error + args := make([]Expr, len(call.GetArgs())) + for i, a := range call.GetArgs() { + args[i], err = exprInternal(factory, a) + if err != nil { + return nil, err + } + } + if call.GetTarget() == nil { + return factory.NewCall(id, call.GetFunction(), args...), nil + } + + target, err := exprInternal(factory, call.GetTarget()) + if err != nil { + return nil, err + } + return factory.NewMemberCall(id, call.GetFunction(), target, args...), nil +} + +func exprComprehension(factory ExprFactory, id int64, comp *exprpb.Expr_Comprehension) (Expr, error) { + iterRange, err := exprInternal(factory, comp.GetIterRange()) + if err != nil { + return nil, err + } + accuInit, err := exprInternal(factory, comp.GetAccuInit()) + if err != nil { + return nil, err + } + loopCond, err := exprInternal(factory, comp.GetLoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := exprInternal(factory, comp.GetLoopStep()) + if err != nil { + return nil, err + } + result, err := exprInternal(factory, comp.GetResult()) + if err != nil { + return nil, err + } + return factory.NewComprehension(id, + iterRange, + comp.GetIterVar(), + comp.GetAccuVar(), + accuInit, + loopCond, + loopStep, + result), nil +} + +func exprLiteral(factory ExprFactory, id int64, c *exprpb.Constant) (Expr, error) { + val, err := ConstantToVal(c) + if err != nil { + return nil, err + } + return factory.NewLiteral(id, val), nil +} + +func exprIdent(factory ExprFactory, id int64, i *exprpb.Expr_Ident) (Expr, error) { + return factory.NewIdent(id, i.GetName()), nil +} + +func exprList(factory ExprFactory, id int64, l *exprpb.Expr_CreateList) (Expr, error) { + elems := make([]Expr, len(l.GetElements())) + for i, e := range l.GetElements() { + elem, err := exprInternal(factory, e) + if err != nil { + return nil, err + } + elems[i] = elem + } + return factory.NewList(id, elems, l.GetOptionalIndices()), nil +} + +func exprMap(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + entries := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, entry := range s.GetEntries() { + entries[i], err = exprMapEntry(factory, entry.GetId(), entry) + if err != nil { + return nil, err + } + } + return factory.NewMap(id, entries), nil +} + +func exprMapEntry(factory ExprFactory, id int64, e *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + k, err := exprInternal(factory, e.GetMapKey()) + if err != nil { + return nil, err + } + v, err := exprInternal(factory, e.GetValue()) + if err != nil { + return nil, err + } + return factory.NewMapEntry(id, k, v, e.GetOptionalEntry()), nil +} + +func exprSelect(factory ExprFactory, id int64, s *exprpb.Expr_Select) (Expr, error) { + op, err := exprInternal(factory, s.GetOperand()) + if err != nil { + return nil, err + } + if s.GetTestOnly() { + return factory.NewPresenceTest(id, op, s.GetField()), nil + } + return factory.NewSelect(id, op, s.GetField()), nil +} + +func exprStruct(factory ExprFactory, id int64, s *exprpb.Expr_CreateStruct) (Expr, error) { + fields := make([]EntryExpr, len(s.GetEntries())) + var err error + for i, field := range s.GetEntries() { + fields[i], err = exprStructField(factory, field.GetId(), field) + if err != nil { + return nil, err + } + } + return factory.NewStruct(id, s.GetMessageName(), fields), nil +} + +func exprStructField(factory ExprFactory, id int64, f *exprpb.Expr_CreateStruct_Entry) (EntryExpr, error) { + v, err := exprInternal(factory, f.GetValue()) + if err != nil { + return nil, err + } + return factory.NewStructField(id, f.GetFieldKey(), v, f.GetOptionalEntry()), nil +} + +// ExprToProto serializes an ast.Expr value to a protobuf Expr representation. +func ExprToProto(e Expr) (*exprpb.Expr, error) { + if e == nil { + return &exprpb.Expr{}, nil + } + switch e.Kind() { + case CallKind: + return protoCall(e.ID(), e.AsCall()) + case ComprehensionKind: + return protoComprehension(e.ID(), e.AsComprehension()) + case IdentKind: + return protoIdent(e.ID(), e.AsIdent()) + case ListKind: + return protoList(e.ID(), e.AsList()) + case LiteralKind: + return protoLiteral(e.ID(), e.AsLiteral()) + case MapKind: + return protoMap(e.ID(), e.AsMap()) + case SelectKind: + return protoSelect(e.ID(), e.AsSelect()) + case StructKind: + return protoStruct(e.ID(), e.AsStruct()) + case UnspecifiedExprKind: + // Handle the case where a macro reference may be getting translated. + // A nested macro 'pointer' is a non-zero expression id with no kind set. + if e.ID() != 0 { + return &exprpb.Expr{Id: e.ID()}, nil + } + return &exprpb.Expr{}, nil + } + return nil, fmt.Errorf("unsupported expr kind: %v", e) +} + +// EntryExprToProto converts an ast.EntryExpr to a protobuf CreateStruct entry +func EntryExprToProto(e EntryExpr) (*exprpb.Expr_CreateStruct_Entry, error) { + switch e.Kind() { + case MapEntryKind: + return protoMapEntry(e.ID(), e.AsMapEntry()) + case StructFieldKind: + return protoStructField(e.ID(), e.AsStructField()) + case UnspecifiedEntryExprKind: + return &exprpb.Expr_CreateStruct_Entry{}, nil + } + return nil, fmt.Errorf("unsupported expr entry kind: %v", e) +} + +func protoCall(id int64, call CallExpr) (*exprpb.Expr, error) { + var err error + var target *exprpb.Expr + if call.IsMemberFunction() { + target, err = ExprToProto(call.Target()) + if err != nil { + return nil, err + } + } + callArgs := call.Args() + args := make([]*exprpb.Expr, len(callArgs)) + for i, a := range callArgs { + args[i], err = ExprToProto(a) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_CallExpr{ + CallExpr: &exprpb.Expr_Call{ + Function: call.FunctionName(), + Target: target, + Args: args, + }, + }, + }, nil +} + +func protoComprehension(id int64, comp ComprehensionExpr) (*exprpb.Expr, error) { + iterRange, err := ExprToProto(comp.IterRange()) + if err != nil { + return nil, err + } + accuInit, err := ExprToProto(comp.AccuInit()) + if err != nil { + return nil, err + } + loopCond, err := ExprToProto(comp.LoopCondition()) + if err != nil { + return nil, err + } + loopStep, err := ExprToProto(comp.LoopStep()) + if err != nil { + return nil, err + } + result, err := ExprToProto(comp.Result()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ComprehensionExpr{ + ComprehensionExpr: &exprpb.Expr_Comprehension{ + IterVar: comp.IterVar(), + IterRange: iterRange, + AccuVar: comp.AccuVar(), + AccuInit: accuInit, + LoopCondition: loopCond, + LoopStep: loopStep, + Result: result, + }, + }, + }, nil +} + +func protoIdent(id int64, name string) (*exprpb.Expr, error) { + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_IdentExpr{ + IdentExpr: &exprpb.Expr_Ident{ + Name: name, + }, + }, + }, nil +} + +func protoList(id int64, list ListExpr) (*exprpb.Expr, error) { + var err error + elems := make([]*exprpb.Expr, list.Size()) + for i, e := range list.Elements() { + elems[i], err = ExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ListExpr{ + ListExpr: &exprpb.Expr_CreateList{ + Elements: elems, + OptionalIndices: list.OptionalIndices(), + }, + }, + }, nil +} + +func protoLiteral(id int64, val ref.Val) (*exprpb.Expr, error) { + c, err := ValToConstant(val) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_ConstExpr{ + ConstExpr: c, + }, + }, nil +} + +func protoMap(id int64, m MapExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(m.Entries())) + var err error + for i, e := range m.Entries() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + Entries: entries, + }, + }, + }, nil +} + +func protoMapEntry(id int64, e MapEntry) (*exprpb.Expr_CreateStruct_Entry, error) { + k, err := ExprToProto(e.Key()) + if err != nil { + return nil, err + } + v, err := ExprToProto(e.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_MapKey{ + MapKey: k, + }, + Value: v, + OptionalEntry: e.IsOptional(), + }, nil +} + +func protoSelect(id int64, s SelectExpr) (*exprpb.Expr, error) { + op, err := ExprToProto(s.Operand()) + if err != nil { + return nil, err + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_SelectExpr{ + SelectExpr: &exprpb.Expr_Select{ + Operand: op, + Field: s.FieldName(), + TestOnly: s.IsTestOnly(), + }, + }, + }, nil +} + +func protoStruct(id int64, s StructExpr) (*exprpb.Expr, error) { + entries := make([]*exprpb.Expr_CreateStruct_Entry, len(s.Fields())) + var err error + for i, e := range s.Fields() { + entries[i], err = EntryExprToProto(e) + if err != nil { + return nil, err + } + } + return &exprpb.Expr{ + Id: id, + ExprKind: &exprpb.Expr_StructExpr{ + StructExpr: &exprpb.Expr_CreateStruct{ + MessageName: s.TypeName(), + Entries: entries, + }, + }, + }, nil +} + +func protoStructField(id int64, f StructField) (*exprpb.Expr_CreateStruct_Entry, error) { + v, err := ExprToProto(f.Value()) + if err != nil { + return nil, err + } + return &exprpb.Expr_CreateStruct_Entry{ + Id: id, + KeyKind: &exprpb.Expr_CreateStruct_Entry_FieldKey{ + FieldKey: f.Name(), + }, + Value: v, + OptionalEntry: f.IsOptional(), + }, nil +} + +// SourceInfoToProto serializes an ast.SourceInfo value to a protobuf SourceInfo object. +func SourceInfoToProto(info *SourceInfo) (*exprpb.SourceInfo, error) { + if info == nil { + return &exprpb.SourceInfo{}, nil + } + sourceInfo := &exprpb.SourceInfo{ + SyntaxVersion: info.SyntaxVersion(), + Location: info.Description(), + LineOffsets: info.LineOffsets(), + Positions: make(map[int64]int32, len(info.OffsetRanges())), + MacroCalls: make(map[int64]*exprpb.Expr, len(info.MacroCalls())), + } + for id, offset := range info.OffsetRanges() { + sourceInfo.Positions[id] = offset.Start + } + for id, e := range info.MacroCalls() { + call, err := ExprToProto(e) + if err != nil { + return nil, err + } + sourceInfo.MacroCalls[id] = call + } + return sourceInfo, nil +} + +// ProtoToSourceInfo deserializes the protobuf into a native SourceInfo value. +func ProtoToSourceInfo(info *exprpb.SourceInfo) (*SourceInfo, error) { + sourceInfo := &SourceInfo{ + syntax: info.GetSyntaxVersion(), + desc: info.GetLocation(), + lines: info.GetLineOffsets(), + offsetRanges: make(map[int64]OffsetRange, len(info.GetPositions())), + macroCalls: make(map[int64]Expr, len(info.GetMacroCalls())), + } + for id, offset := range info.GetPositions() { + sourceInfo.SetOffsetRange(id, OffsetRange{Start: offset, Stop: offset}) + } + for id, e := range info.GetMacroCalls() { + call, err := ProtoToExpr(e) + if err != nil { + return nil, err + } + sourceInfo.SetMacroCall(id, call) + } + return sourceInfo, nil +} + +// ReferenceInfoToProto converts a ReferenceInfo instance to a protobuf Reference suitable for serialization. +func ReferenceInfoToProto(info *ReferenceInfo) (*exprpb.Reference, error) { + c, err := ValToConstant(info.Value) + if err != nil { + return nil, err + } + return &exprpb.Reference{ + Name: info.Name, + OverloadId: info.OverloadIDs, + Value: c, + }, nil +} + +// ProtoToReferenceInfo converts a protobuf Reference into a CEL-native ReferenceInfo instance. +func ProtoToReferenceInfo(ref *exprpb.Reference) (*ReferenceInfo, error) { + v, err := ConstantToVal(ref.GetValue()) + if err != nil { + return nil, err + } + return &ReferenceInfo{ + Name: ref.GetName(), + OverloadIDs: ref.GetOverloadId(), + Value: v, + }, nil +} + +// ValToConstant converts a CEL-native ref.Val to a protobuf Constant. +// +// Only simple scalar types are supported by this method. +func ValToConstant(v ref.Val) (*exprpb.Constant, error) { + if v == nil { + return nil, nil + } + switch v.Type() { + case types.BoolType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BoolValue{BoolValue: v.Value().(bool)}}, nil + case types.BytesType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_BytesValue{BytesValue: v.Value().([]byte)}}, nil + case types.DoubleType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_DoubleValue{DoubleValue: v.Value().(float64)}}, nil + case types.IntType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Int64Value{Int64Value: v.Value().(int64)}}, nil + case types.NullType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_NullValue{NullValue: structpb.NullValue_NULL_VALUE}}, nil + case types.StringType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_StringValue{StringValue: v.Value().(string)}}, nil + case types.UintType: + return &exprpb.Constant{ConstantKind: &exprpb.Constant_Uint64Value{Uint64Value: v.Value().(uint64)}}, nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", v.Type()) +} + +// ConstantToVal converts a protobuf Constant to a CEL-native ref.Val. +func ConstantToVal(c *exprpb.Constant) (ref.Val, error) { + if c == nil { + return nil, nil + } + switch c.GetConstantKind().(type) { + case *exprpb.Constant_BoolValue: + return types.Bool(c.GetBoolValue()), nil + case *exprpb.Constant_BytesValue: + return types.Bytes(c.GetBytesValue()), nil + case *exprpb.Constant_DoubleValue: + return types.Double(c.GetDoubleValue()), nil + case *exprpb.Constant_Int64Value: + return types.Int(c.GetInt64Value()), nil + case *exprpb.Constant_NullValue: + return types.NullValue, nil + case *exprpb.Constant_StringValue: + return types.String(c.GetStringValue()), nil + case *exprpb.Constant_Uint64Value: + return types.Uint(c.GetUint64Value()), nil + } + return nil, fmt.Errorf("unsupported constant kind: %v", c.GetConstantKind()) +} diff --git a/vendor/github.com/google/cel-go/common/ast/expr.go b/vendor/github.com/google/cel-go/common/ast/expr.go new file mode 100644 index 00000000..7b043a15 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/expr.go @@ -0,0 +1,860 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// ExprKind represents the expression node kind. +type ExprKind int + +const ( + // UnspecifiedExprKind represents an unset expression with no specified properties. + UnspecifiedExprKind ExprKind = iota + + // CallKind represents a function call. + CallKind + + // ComprehensionKind represents a comprehension expression generated by a macro. + ComprehensionKind + + // IdentKind represents a simple variable, constant, or type identifier. + IdentKind + + // ListKind represents a list literal expression. + ListKind + + // LiteralKind represents a primitive scalar literal. + LiteralKind + + // MapKind represents a map literal expression. + MapKind + + // SelectKind represents a field selection expression. + SelectKind + + // StructKind represents a struct literal expression. + StructKind +) + +// Expr represents the base expression node in a CEL abstract syntax tree. +// +// Depending on the `Kind()` value, the Expr may be converted to a concrete expression types +// as indicated by the `As` methods. +type Expr interface { + // ID of the expression as it appears in the AST + ID() int64 + + // Kind of the expression node. See ExprKind for the valid enum values. + Kind() ExprKind + + // AsCall adapts the expr into a CallExpr + // + // The Kind() must be equal to a CallKind for the conversion to be well-defined. + AsCall() CallExpr + + // AsComprehension adapts the expr into a ComprehensionExpr. + // + // The Kind() must be equal to a ComprehensionKind for the conversion to be well-defined. + AsComprehension() ComprehensionExpr + + // AsIdent adapts the expr into an identifier string. + // + // The Kind() must be equal to an IdentKind for the conversion to be well-defined. + AsIdent() string + + // AsLiteral adapts the expr into a constant ref.Val. + // + // The Kind() must be equal to a LiteralKind for the conversion to be well-defined. + AsLiteral() ref.Val + + // AsList adapts the expr into a ListExpr. + // + // The Kind() must be equal to a ListKind for the conversion to be well-defined. + AsList() ListExpr + + // AsMap adapts the expr into a MapExpr. + // + // The Kind() must be equal to a MapKind for the conversion to be well-defined. + AsMap() MapExpr + + // AsSelect adapts the expr into a SelectExpr. + // + // The Kind() must be equal to a SelectKind for the conversion to be well-defined. + AsSelect() SelectExpr + + // AsStruct adapts the expr into a StructExpr. + // + // The Kind() must be equal to a StructKind for the conversion to be well-defined. + AsStruct() StructExpr + + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + // SetKindCase replaces the contents of the current expression with the contents of the other. + // + // The SetKindCase takes ownership of any expression instances references within the input Expr. + // A shallow copy is made of the Expr value itself, but not a deep one. + // + // This method should only be used during AST rewrites using temporary Expr values. + SetKindCase(Expr) + + // isExpr is a marker interface. + isExpr() +} + +// EntryExprKind represents the possible EntryExpr kinds. +type EntryExprKind int + +const ( + // UnspecifiedEntryExprKind indicates that the entry expr is not set. + UnspecifiedEntryExprKind EntryExprKind = iota + + // MapEntryKind indicates that the entry is a MapEntry type with key and value expressions. + MapEntryKind + + // StructFieldKind indicates that the entry is a StructField with a field name and initializer + // expression. + StructFieldKind +) + +// EntryExpr represents the base entry expression in a CEL map or struct literal. +type EntryExpr interface { + // ID of the entry as it appears in the AST. + ID() int64 + + // Kind of the entry expression node. See EntryExprKind for valid enum values. + Kind() EntryExprKind + + // AsMapEntry casts the EntryExpr to a MapEntry. + // + // The Kind() must be equal to MapEntryKind for the conversion to be well-defined. + AsMapEntry() MapEntry + + // AsStructField casts the EntryExpr to a StructField + // + // The Kind() must be equal to StructFieldKind for the conversion to be well-defined. + AsStructField() StructField + + // RenumberIDs performs an in-place update of the expression and all of its descendents numeric ids. + RenumberIDs(IDGenerator) + + isEntryExpr() +} + +// IDGenerator produces unique ids suitable for tagging expression nodes +type IDGenerator func(originalID int64) int64 + +// CallExpr defines an interface for inspecting a function call and its arguments. +type CallExpr interface { + // FunctionName returns the name of the function. + FunctionName() string + + // IsMemberFunction returns whether the call has a non-nil target indicating it is a member function + IsMemberFunction() bool + + // Target returns the target of the expression if one is present. + Target() Expr + + // Args returns the list of call arguments, excluding the target. + Args() []Expr + + // marker interface method + isExpr() +} + +// ListExpr defines an interface for inspecting a list literal expression. +type ListExpr interface { + // Elements returns the list elements as navigable expressions. + Elements() []Expr + + // OptionalIndicies returns the list of optional indices in the list literal. + OptionalIndices() []int32 + + // IsOptional indicates whether the given element index is optional. + IsOptional(int32) bool + + // Size returns the number of elements in the list. + Size() int + + // marker interface method + isExpr() +} + +// SelectExpr defines an interface for inspecting a select expression. +type SelectExpr interface { + // Operand returns the selection operand expression. + Operand() Expr + + // FieldName returns the field name being selected from the operand. + FieldName() string + + // IsTestOnly indicates whether the select expression is a presence test generated by a macro. + IsTestOnly() bool + + // marker interface method + isExpr() +} + +// MapExpr defines an interface for inspecting a map expression. +type MapExpr interface { + // Entries returns the map key value pairs as EntryExpr values. + Entries() []EntryExpr + + // Size returns the number of entries in the map. + Size() int + + // marker interface method + isExpr() +} + +// MapEntry defines an interface for inspecting a map entry. +type MapEntry interface { + // Key returns the map entry key expression. + Key() Expr + + // Value returns the map entry value expression. + Value() Expr + + // IsOptional returns whether the entry is optional. + IsOptional() bool + + // marker interface method + isEntryExpr() +} + +// StructExpr defines an interfaces for inspecting a struct and its field initializers. +type StructExpr interface { + // TypeName returns the struct type name. + TypeName() string + + // Fields returns the set of field initializers in the struct expression as EntryExpr values. + Fields() []EntryExpr + + // marker interface method + isExpr() +} + +// StructField defines an interface for inspecting a struct field initialization. +type StructField interface { + // Name returns the name of the field. + Name() string + + // Value returns the field initialization expression. + Value() Expr + + // IsOptional returns whether the field is optional. + IsOptional() bool + + // marker interface method + isEntryExpr() +} + +// ComprehensionExpr defines an interface for inspecting a comprehension expression. +type ComprehensionExpr interface { + // IterRange returns the iteration range expression. + IterRange() Expr + + // IterVar returns the iteration variable name. + IterVar() string + + // AccuVar returns the accumulation variable name. + AccuVar() string + + // AccuInit returns the accumulation variable initialization expression. + AccuInit() Expr + + // LoopCondition returns the loop condition expression. + LoopCondition() Expr + + // LoopStep returns the loop step expression. + LoopStep() Expr + + // Result returns the comprehension result expression. + Result() Expr + + // marker interface method + isExpr() +} + +var _ Expr = &expr{} + +type expr struct { + id int64 + exprKindCase +} + +type exprKindCase interface { + Kind() ExprKind + + renumberIDs(IDGenerator) + + isExpr() +} + +func (e *expr) ID() int64 { + if e == nil { + return 0 + } + return e.id +} + +func (e *expr) Kind() ExprKind { + if e == nil || e.exprKindCase == nil { + return UnspecifiedExprKind + } + return e.exprKindCase.Kind() +} + +func (e *expr) AsCall() CallExpr { + if e.Kind() != CallKind { + return nilCall + } + return e.exprKindCase.(CallExpr) +} + +func (e *expr) AsComprehension() ComprehensionExpr { + if e.Kind() != ComprehensionKind { + return nilCompre + } + return e.exprKindCase.(ComprehensionExpr) +} + +func (e *expr) AsIdent() string { + if e.Kind() != IdentKind { + return "" + } + return string(e.exprKindCase.(baseIdentExpr)) +} + +func (e *expr) AsLiteral() ref.Val { + if e.Kind() != LiteralKind { + return nil + } + return e.exprKindCase.(*baseLiteral).Val +} + +func (e *expr) AsList() ListExpr { + if e.Kind() != ListKind { + return nilList + } + return e.exprKindCase.(ListExpr) +} + +func (e *expr) AsMap() MapExpr { + if e.Kind() != MapKind { + return nilMap + } + return e.exprKindCase.(MapExpr) +} + +func (e *expr) AsSelect() SelectExpr { + if e.Kind() != SelectKind { + return nilSel + } + return e.exprKindCase.(SelectExpr) +} + +func (e *expr) AsStruct() StructExpr { + if e.Kind() != StructKind { + return nilStruct + } + return e.exprKindCase.(StructExpr) +} + +func (e *expr) SetKindCase(other Expr) { + if e == nil { + return + } + if other == nil { + e.exprKindCase = nil + return + } + switch other.Kind() { + case CallKind: + c := other.AsCall() + e.exprKindCase = &baseCallExpr{ + function: c.FunctionName(), + target: c.Target(), + args: c.Args(), + isMember: c.IsMemberFunction(), + } + case ComprehensionKind: + c := other.AsComprehension() + e.exprKindCase = &baseComprehensionExpr{ + iterRange: c.IterRange(), + iterVar: c.IterVar(), + accuVar: c.AccuVar(), + accuInit: c.AccuInit(), + loopCond: c.LoopCondition(), + loopStep: c.LoopStep(), + result: c.Result(), + } + case IdentKind: + e.exprKindCase = baseIdentExpr(other.AsIdent()) + case ListKind: + l := other.AsList() + optIndexMap := make(map[int32]struct{}, len(l.OptionalIndices())) + for _, idx := range l.OptionalIndices() { + optIndexMap[idx] = struct{}{} + } + e.exprKindCase = &baseListExpr{ + elements: l.Elements(), + optIndices: l.OptionalIndices(), + optIndexMap: optIndexMap, + } + case LiteralKind: + e.exprKindCase = &baseLiteral{Val: other.AsLiteral()} + case MapKind: + e.exprKindCase = &baseMapExpr{ + entries: other.AsMap().Entries(), + } + case SelectKind: + s := other.AsSelect() + e.exprKindCase = &baseSelectExpr{ + operand: s.Operand(), + field: s.FieldName(), + testOnly: s.IsTestOnly(), + } + case StructKind: + s := other.AsStruct() + e.exprKindCase = &baseStructExpr{ + typeName: s.TypeName(), + fields: s.Fields(), + } + case UnspecifiedExprKind: + e.exprKindCase = nil + } +} + +func (e *expr) RenumberIDs(idGen IDGenerator) { + if e == nil { + return + } + e.id = idGen(e.id) + if e.exprKindCase != nil { + e.exprKindCase.renumberIDs(idGen) + } +} + +type baseCallExpr struct { + function string + target Expr + args []Expr + isMember bool +} + +func (*baseCallExpr) Kind() ExprKind { + return CallKind +} + +func (e *baseCallExpr) FunctionName() string { + if e == nil { + return "" + } + return e.function +} + +func (e *baseCallExpr) IsMemberFunction() bool { + if e == nil { + return false + } + return e.isMember +} + +func (e *baseCallExpr) Target() Expr { + if e == nil || !e.IsMemberFunction() { + return nilExpr + } + return e.target +} + +func (e *baseCallExpr) Args() []Expr { + if e == nil { + return []Expr{} + } + return e.args +} + +func (e *baseCallExpr) renumberIDs(idGen IDGenerator) { + if e.IsMemberFunction() { + e.Target().RenumberIDs(idGen) + } + for _, arg := range e.Args() { + arg.RenumberIDs(idGen) + } +} + +func (*baseCallExpr) isExpr() {} + +var _ ComprehensionExpr = &baseComprehensionExpr{} + +type baseComprehensionExpr struct { + iterRange Expr + iterVar string + accuVar string + accuInit Expr + loopCond Expr + loopStep Expr + result Expr +} + +func (*baseComprehensionExpr) Kind() ExprKind { + return ComprehensionKind +} + +func (e *baseComprehensionExpr) IterRange() Expr { + if e == nil { + return nilExpr + } + return e.iterRange +} + +func (e *baseComprehensionExpr) IterVar() string { + return e.iterVar +} + +func (e *baseComprehensionExpr) AccuVar() string { + return e.accuVar +} + +func (e *baseComprehensionExpr) AccuInit() Expr { + if e == nil { + return nilExpr + } + return e.accuInit +} + +func (e *baseComprehensionExpr) LoopCondition() Expr { + if e == nil { + return nilExpr + } + return e.loopCond +} + +func (e *baseComprehensionExpr) LoopStep() Expr { + if e == nil { + return nilExpr + } + return e.loopStep +} + +func (e *baseComprehensionExpr) Result() Expr { + if e == nil { + return nilExpr + } + return e.result +} + +func (e *baseComprehensionExpr) renumberIDs(idGen IDGenerator) { + e.IterRange().RenumberIDs(idGen) + e.AccuInit().RenumberIDs(idGen) + e.LoopCondition().RenumberIDs(idGen) + e.LoopStep().RenumberIDs(idGen) + e.Result().RenumberIDs(idGen) +} + +func (*baseComprehensionExpr) isExpr() {} + +var _ exprKindCase = baseIdentExpr("") + +type baseIdentExpr string + +func (baseIdentExpr) Kind() ExprKind { + return IdentKind +} + +func (e baseIdentExpr) renumberIDs(IDGenerator) {} + +func (baseIdentExpr) isExpr() {} + +var _ exprKindCase = &baseLiteral{} +var _ ref.Val = &baseLiteral{} + +type baseLiteral struct { + ref.Val +} + +func (*baseLiteral) Kind() ExprKind { + return LiteralKind +} + +func (l *baseLiteral) renumberIDs(IDGenerator) {} + +func (*baseLiteral) isExpr() {} + +var _ ListExpr = &baseListExpr{} + +type baseListExpr struct { + elements []Expr + optIndices []int32 + optIndexMap map[int32]struct{} +} + +func (*baseListExpr) Kind() ExprKind { + return ListKind +} + +func (e *baseListExpr) Elements() []Expr { + if e == nil { + return []Expr{} + } + return e.elements +} + +func (e *baseListExpr) IsOptional(index int32) bool { + _, found := e.optIndexMap[index] + return found +} + +func (e *baseListExpr) OptionalIndices() []int32 { + if e == nil { + return []int32{} + } + return e.optIndices +} + +func (e *baseListExpr) Size() int { + return len(e.Elements()) +} + +func (e *baseListExpr) renumberIDs(idGen IDGenerator) { + for _, elem := range e.Elements() { + elem.RenumberIDs(idGen) + } +} + +func (*baseListExpr) isExpr() {} + +type baseMapExpr struct { + entries []EntryExpr +} + +func (*baseMapExpr) Kind() ExprKind { + return MapKind +} + +func (e *baseMapExpr) Entries() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.entries +} + +func (e *baseMapExpr) Size() int { + return len(e.Entries()) +} + +func (e *baseMapExpr) renumberIDs(idGen IDGenerator) { + for _, entry := range e.Entries() { + entry.RenumberIDs(idGen) + } +} + +func (*baseMapExpr) isExpr() {} + +type baseSelectExpr struct { + operand Expr + field string + testOnly bool +} + +func (*baseSelectExpr) Kind() ExprKind { + return SelectKind +} + +func (e *baseSelectExpr) Operand() Expr { + if e == nil || e.operand == nil { + return nilExpr + } + return e.operand +} + +func (e *baseSelectExpr) FieldName() string { + if e == nil { + return "" + } + return e.field +} + +func (e *baseSelectExpr) IsTestOnly() bool { + if e == nil { + return false + } + return e.testOnly +} + +func (e *baseSelectExpr) renumberIDs(idGen IDGenerator) { + e.Operand().RenumberIDs(idGen) +} + +func (*baseSelectExpr) isExpr() {} + +type baseStructExpr struct { + typeName string + fields []EntryExpr +} + +func (*baseStructExpr) Kind() ExprKind { + return StructKind +} + +func (e *baseStructExpr) TypeName() string { + if e == nil { + return "" + } + return e.typeName +} + +func (e *baseStructExpr) Fields() []EntryExpr { + if e == nil { + return []EntryExpr{} + } + return e.fields +} + +func (e *baseStructExpr) renumberIDs(idGen IDGenerator) { + for _, f := range e.Fields() { + f.RenumberIDs(idGen) + } +} + +func (*baseStructExpr) isExpr() {} + +type entryExprKindCase interface { + Kind() EntryExprKind + + renumberIDs(IDGenerator) + + isEntryExpr() +} + +var _ EntryExpr = &entryExpr{} + +type entryExpr struct { + id int64 + entryExprKindCase +} + +func (e *entryExpr) ID() int64 { + return e.id +} + +func (e *entryExpr) AsMapEntry() MapEntry { + if e.Kind() != MapEntryKind { + return nilMapEntry + } + return e.entryExprKindCase.(MapEntry) +} + +func (e *entryExpr) AsStructField() StructField { + if e.Kind() != StructFieldKind { + return nilStructField + } + return e.entryExprKindCase.(StructField) +} + +func (e *entryExpr) RenumberIDs(idGen IDGenerator) { + e.id = idGen(e.id) + e.entryExprKindCase.renumberIDs(idGen) +} + +type baseMapEntry struct { + key Expr + value Expr + isOptional bool +} + +func (e *baseMapEntry) Kind() EntryExprKind { + return MapEntryKind +} + +func (e *baseMapEntry) Key() Expr { + if e == nil { + return nilExpr + } + return e.key +} + +func (e *baseMapEntry) Value() Expr { + if e == nil { + return nilExpr + } + return e.value +} + +func (e *baseMapEntry) IsOptional() bool { + if e == nil { + return false + } + return e.isOptional +} + +func (e *baseMapEntry) renumberIDs(idGen IDGenerator) { + e.Key().RenumberIDs(idGen) + e.Value().RenumberIDs(idGen) +} + +func (*baseMapEntry) isEntryExpr() {} + +type baseStructField struct { + field string + value Expr + isOptional bool +} + +func (f *baseStructField) Kind() EntryExprKind { + return StructFieldKind +} + +func (f *baseStructField) Name() string { + if f == nil { + return "" + } + return f.field +} + +func (f *baseStructField) Value() Expr { + if f == nil { + return nilExpr + } + return f.value +} + +func (f *baseStructField) IsOptional() bool { + if f == nil { + return false + } + return f.isOptional +} + +func (f *baseStructField) renumberIDs(idGen IDGenerator) { + f.Value().RenumberIDs(idGen) +} + +func (*baseStructField) isEntryExpr() {} + +var ( + nilExpr *expr = nil + nilCall *baseCallExpr = nil + nilCompre *baseComprehensionExpr = nil + nilList *baseListExpr = nil + nilMap *baseMapExpr = nil + nilMapEntry *baseMapEntry = nil + nilSel *baseSelectExpr = nil + nilStruct *baseStructExpr = nil + nilStructField *baseStructField = nil +) diff --git a/vendor/github.com/google/cel-go/common/ast/factory.go b/vendor/github.com/google/cel-go/common/ast/factory.go new file mode 100644 index 00000000..b7f36e72 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/factory.go @@ -0,0 +1,303 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import "github.com/google/cel-go/common/types/ref" + +// ExprFactory interfaces defines a set of methods necessary for building native expression values. +type ExprFactory interface { + // CopyExpr creates a deep copy of the input Expr value. + CopyExpr(Expr) Expr + + // CopyEntryExpr creates a deep copy of the input EntryExpr value. + CopyEntryExpr(EntryExpr) EntryExpr + + // NewCall creates an Expr value representing a global function call. + NewCall(id int64, function string, args ...Expr) Expr + + // NewComprehension creates an Expr value representing a comprehension over a value range. + NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCondition, loopStep, result Expr) Expr + + // NewMemberCall creates an Expr value representing a member function call. + NewMemberCall(id int64, function string, receiver Expr, args ...Expr) Expr + + // NewIdent creates an Expr value representing an identifier. + NewIdent(id int64, name string) Expr + + // NewAccuIdent creates an Expr value representing an accumulator identifier within a + //comprehension. + NewAccuIdent(id int64) Expr + + // NewLiteral creates an Expr value representing a literal value, such as a string or integer. + NewLiteral(id int64, value ref.Val) Expr + + // NewList creates an Expr value representing a list literal expression with optional indices. + // + // Optional indicies will typically be empty unless the CEL optional types are enabled. + NewList(id int64, elems []Expr, optIndices []int32) Expr + + // NewMap creates an Expr value representing a map literal expression + NewMap(id int64, entries []EntryExpr) Expr + + // NewMapEntry creates a MapEntry with a given key, value, and a flag indicating whether + // the key is optionally set. + NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr + + // NewPresenceTest creates an Expr representing a field presence test on an operand expression. + NewPresenceTest(id int64, operand Expr, field string) Expr + + // NewSelect creates an Expr representing a field selection on an operand expression. + NewSelect(id int64, operand Expr, field string) Expr + + // NewStruct creates an Expr value representing a struct literal with a given type name and a + // set of field initializers. + NewStruct(id int64, typeName string, fields []EntryExpr) Expr + + // NewStructField creates a StructField with a given field name, value, and a flag indicating + // whether the field is optionally set. + NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr + + // NewUnspecifiedExpr creates an empty expression node. + NewUnspecifiedExpr(id int64) Expr + + isExprFactory() +} + +type baseExprFactory struct{} + +// NewExprFactory creates an ExprFactory instance. +func NewExprFactory() ExprFactory { + return &baseExprFactory{} +} + +func (fac *baseExprFactory) NewCall(id int64, function string, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: nilExpr, + args: args, + isMember: false, + }) +} + +func (fac *baseExprFactory) NewMemberCall(id int64, function string, target Expr, args ...Expr) Expr { + if len(args) == 0 { + args = []Expr{} + } + return fac.newExpr( + id, + &baseCallExpr{ + function: function, + target: target, + args: args, + isMember: true, + }) +} + +func (fac *baseExprFactory) NewComprehension(id int64, iterRange Expr, iterVar, accuVar string, accuInit, loopCond, loopStep, result Expr) Expr { + return fac.newExpr( + id, + &baseComprehensionExpr{ + iterRange: iterRange, + iterVar: iterVar, + accuVar: accuVar, + accuInit: accuInit, + loopCond: loopCond, + loopStep: loopStep, + result: result, + }) +} + +func (fac *baseExprFactory) NewIdent(id int64, name string) Expr { + return fac.newExpr(id, baseIdentExpr(name)) +} + +func (fac *baseExprFactory) NewAccuIdent(id int64) Expr { + return fac.NewIdent(id, "__result__") +} + +func (fac *baseExprFactory) NewLiteral(id int64, value ref.Val) Expr { + return fac.newExpr(id, &baseLiteral{Val: value}) +} + +func (fac *baseExprFactory) NewList(id int64, elems []Expr, optIndices []int32) Expr { + optIndexMap := make(map[int32]struct{}, len(optIndices)) + for _, idx := range optIndices { + optIndexMap[idx] = struct{}{} + } + return fac.newExpr(id, + &baseListExpr{ + elements: elems, + optIndices: optIndices, + optIndexMap: optIndexMap, + }) +} + +func (fac *baseExprFactory) NewMap(id int64, entries []EntryExpr) Expr { + return fac.newExpr(id, &baseMapExpr{entries: entries}) +} + +func (fac *baseExprFactory) NewMapEntry(id int64, key, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseMapEntry{ + key: key, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewPresenceTest(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + testOnly: true, + }) +} + +func (fac *baseExprFactory) NewSelect(id int64, operand Expr, field string) Expr { + return fac.newExpr( + id, + &baseSelectExpr{ + operand: operand, + field: field, + }) +} + +func (fac *baseExprFactory) NewStruct(id int64, typeName string, fields []EntryExpr) Expr { + return fac.newExpr( + id, + &baseStructExpr{ + typeName: typeName, + fields: fields, + }) +} + +func (fac *baseExprFactory) NewStructField(id int64, field string, value Expr, isOptional bool) EntryExpr { + return fac.newEntryExpr( + id, + &baseStructField{ + field: field, + value: value, + isOptional: isOptional, + }) +} + +func (fac *baseExprFactory) NewUnspecifiedExpr(id int64) Expr { + return fac.newExpr(id, nil) +} + +func (fac *baseExprFactory) CopyExpr(e Expr) Expr { + // unwrap navigable expressions to avoid unnecessary allocations during copying. + if nav, ok := e.(*navigableExprImpl); ok { + e = nav.Expr + } + switch e.Kind() { + case CallKind: + c := e.AsCall() + argsCopy := make([]Expr, len(c.Args())) + for i, arg := range c.Args() { + argsCopy[i] = fac.CopyExpr(arg) + } + if !c.IsMemberFunction() { + return fac.NewCall(e.ID(), c.FunctionName(), argsCopy...) + } + return fac.NewMemberCall(e.ID(), c.FunctionName(), fac.CopyExpr(c.Target()), argsCopy...) + case ComprehensionKind: + compre := e.AsComprehension() + return fac.NewComprehension(e.ID(), + fac.CopyExpr(compre.IterRange()), + compre.IterVar(), + compre.AccuVar(), + fac.CopyExpr(compre.AccuInit()), + fac.CopyExpr(compre.LoopCondition()), + fac.CopyExpr(compre.LoopStep()), + fac.CopyExpr(compre.Result())) + case IdentKind: + return fac.NewIdent(e.ID(), e.AsIdent()) + case ListKind: + l := e.AsList() + elemsCopy := make([]Expr, l.Size()) + for i, elem := range l.Elements() { + elemsCopy[i] = fac.CopyExpr(elem) + } + return fac.NewList(e.ID(), elemsCopy, l.OptionalIndices()) + case LiteralKind: + return fac.NewLiteral(e.ID(), e.AsLiteral()) + case MapKind: + m := e.AsMap() + entriesCopy := make([]EntryExpr, m.Size()) + for i, entry := range m.Entries() { + entriesCopy[i] = fac.CopyEntryExpr(entry) + } + return fac.NewMap(e.ID(), entriesCopy) + case SelectKind: + s := e.AsSelect() + if s.IsTestOnly() { + return fac.NewPresenceTest(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + } + return fac.NewSelect(e.ID(), fac.CopyExpr(s.Operand()), s.FieldName()) + case StructKind: + s := e.AsStruct() + fieldsCopy := make([]EntryExpr, len(s.Fields())) + for i, field := range s.Fields() { + fieldsCopy[i] = fac.CopyEntryExpr(field) + } + return fac.NewStruct(e.ID(), s.TypeName(), fieldsCopy) + default: + return fac.NewUnspecifiedExpr(e.ID()) + } +} + +func (fac *baseExprFactory) CopyEntryExpr(e EntryExpr) EntryExpr { + switch e.Kind() { + case MapEntryKind: + entry := e.AsMapEntry() + return fac.NewMapEntry(e.ID(), + fac.CopyExpr(entry.Key()), fac.CopyExpr(entry.Value()), entry.IsOptional()) + case StructFieldKind: + field := e.AsStructField() + return fac.NewStructField(e.ID(), + field.Name(), fac.CopyExpr(field.Value()), field.IsOptional()) + default: + return fac.newEntryExpr(e.ID(), nil) + } +} + +func (*baseExprFactory) isExprFactory() {} + +func (fac *baseExprFactory) newExpr(id int64, e exprKindCase) Expr { + return &expr{ + id: id, + exprKindCase: e, + } +} + +func (fac *baseExprFactory) newEntryExpr(id int64, e entryExprKindCase) EntryExpr { + return &entryExpr{ + id: id, + entryExprKindCase: e, + } +} + +var ( + defaultFactory = &baseExprFactory{} +) diff --git a/vendor/github.com/google/cel-go/common/ast/navigable.go b/vendor/github.com/google/cel-go/common/ast/navigable.go new file mode 100644 index 00000000..f5ddf6aa --- /dev/null +++ b/vendor/github.com/google/cel-go/common/ast/navigable.go @@ -0,0 +1,652 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ast + +import ( + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// NavigableExpr represents the base navigable expression value with methods to inspect the +// parent and child expressions. +type NavigableExpr interface { + Expr + + // Type of the expression. + // + // If the expression is type-checked, the type check metadata is returned. If the expression + // has not been type-checked, the types.DynType value is returned. + Type() *types.Type + + // Parent returns the parent expression node, if one exists. + Parent() (NavigableExpr, bool) + + // Children returns a list of child expression nodes. + Children() []NavigableExpr + + // Depth indicates the depth in the expression tree. + // + // The root expression has depth 0. + Depth() int +} + +// NavigateAST converts an AST to a NavigableExpr +func NavigateAST(ast *AST) NavigableExpr { + return NavigateExpr(ast, ast.Expr()) +} + +// NavigateExpr creates a NavigableExpr whose type information is backed by the input AST. +// +// If the expression is already a NavigableExpr, the parent and depth information will be +// propagated on the new NavigableExpr value; otherwise, the expr value will be treated +// as though it is the root of the expression graph with a depth of 0. +func NavigateExpr(ast *AST, expr Expr) NavigableExpr { + depth := 0 + var parent NavigableExpr = nil + if nav, ok := expr.(NavigableExpr); ok { + depth = nav.Depth() + parent, _ = nav.Parent() + } + return newNavigableExpr(ast, parent, expr, depth) +} + +// ExprMatcher takes a NavigableExpr in and indicates whether the value is a match. +// +// This function type should be use with the `Match` and `MatchList` calls. +type ExprMatcher func(NavigableExpr) bool + +// ConstantValueMatcher returns an ExprMatcher which will return true if the input NavigableExpr +// is comprised of all constant values, such as a simple literal or even list and map literal. +func ConstantValueMatcher() ExprMatcher { + return matchIsConstantValue +} + +// KindMatcher returns an ExprMatcher which will return true if the input NavigableExpr.Kind() matches +// the specified `kind`. +func KindMatcher(kind ExprKind) ExprMatcher { + return func(e NavigableExpr) bool { + return e.Kind() == kind + } +} + +// FunctionMatcher returns an ExprMatcher which will match NavigableExpr nodes of CallKind type whose +// function name is equal to `funcName`. +func FunctionMatcher(funcName string) ExprMatcher { + return func(e NavigableExpr) bool { + if e.Kind() != CallKind { + return false + } + return e.AsCall().FunctionName() == funcName + } +} + +// AllMatcher returns true for all descendants of a NavigableExpr, effectively flattening them into a list. +// +// Such a result would work well with subsequent MatchList calls. +func AllMatcher() ExprMatcher { + return func(NavigableExpr) bool { + return true + } +} + +// MatchDescendants takes a NavigableExpr and ExprMatcher and produces a list of NavigableExpr values +// matching the input criteria in post-order (bottom up). +func MatchDescendants(expr NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + visit(expr, navVisitor, postOrder, 0, 0) + return matches +} + +// MatchSubset applies an ExprMatcher to a list of NavigableExpr values and their descendants, producing a +// subset of NavigableExpr values which match. +func MatchSubset(exprs []NavigableExpr, matcher ExprMatcher) []NavigableExpr { + matches := []NavigableExpr{} + navVisitor := &baseVisitor{ + visitExpr: func(e Expr) { + nav := e.(NavigableExpr) + if matcher(nav) { + matches = append(matches, nav) + } + }, + } + for _, expr := range exprs { + visit(expr, navVisitor, postOrder, 0, 1) + } + return matches +} + +// Visitor defines an object for visiting Expr and EntryExpr nodes within an expression graph. +type Visitor interface { + // VisitExpr visits the input expression. + VisitExpr(Expr) + + // VisitEntryExpr visits the input entry expression, i.e. a struct field or map entry. + VisitEntryExpr(EntryExpr) +} + +type baseVisitor struct { + visitExpr func(Expr) + visitEntryExpr func(EntryExpr) +} + +// VisitExpr visits the Expr if the internal expr visitor has been configured. +func (v *baseVisitor) VisitExpr(e Expr) { + if v.visitExpr != nil { + v.visitExpr(e) + } +} + +// VisitEntryExpr visits the entry if the internal expr entry visitor has been configured. +func (v *baseVisitor) VisitEntryExpr(e EntryExpr) { + if v.visitEntryExpr != nil { + v.visitEntryExpr(e) + } +} + +// NewExprVisitor creates a visitor which only visits expression nodes. +func NewExprVisitor(v func(Expr)) Visitor { + return &baseVisitor{ + visitExpr: v, + visitEntryExpr: nil, + } +} + +// PostOrderVisit walks the expression graph and calls the visitor in post-order (bottom-up). +func PostOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, postOrder, 0, 0) +} + +// PreOrderVisit walks the expression graph and calls the visitor in pre-order (top-down). +func PreOrderVisit(expr Expr, visitor Visitor) { + visit(expr, visitor, preOrder, 0, 0) +} + +type visitOrder int + +const ( + preOrder = iota + 1 + postOrder +) + +// TODO: consider exposing a way to configure a limit for the max visit depth. +// It's possible that we could want to configure this on the NewExprVisitor() +// and through MatchDescendents() / MaxID(). +func visit(expr Expr, visitor Visitor, order visitOrder, depth, maxDepth int) { + if maxDepth > 0 && depth == maxDepth { + return + } + if order == preOrder { + visitor.VisitExpr(expr) + } + switch expr.Kind() { + case CallKind: + c := expr.AsCall() + if c.IsMemberFunction() { + visit(c.Target(), visitor, order, depth+1, maxDepth) + } + for _, arg := range c.Args() { + visit(arg, visitor, order, depth+1, maxDepth) + } + case ComprehensionKind: + c := expr.AsComprehension() + visit(c.IterRange(), visitor, order, depth+1, maxDepth) + visit(c.AccuInit(), visitor, order, depth+1, maxDepth) + visit(c.LoopCondition(), visitor, order, depth+1, maxDepth) + visit(c.LoopStep(), visitor, order, depth+1, maxDepth) + visit(c.Result(), visitor, order, depth+1, maxDepth) + case ListKind: + l := expr.AsList() + for _, elem := range l.Elements() { + visit(elem, visitor, order, depth+1, maxDepth) + } + case MapKind: + m := expr.AsMap() + for _, e := range m.Entries() { + if order == preOrder { + visitor.VisitEntryExpr(e) + } + entry := e.AsMapEntry() + visit(entry.Key(), visitor, order, depth+1, maxDepth) + visit(entry.Value(), visitor, order, depth+1, maxDepth) + if order == postOrder { + visitor.VisitEntryExpr(e) + } + } + case SelectKind: + visit(expr.AsSelect().Operand(), visitor, order, depth+1, maxDepth) + case StructKind: + s := expr.AsStruct() + for _, f := range s.Fields() { + visitor.VisitEntryExpr(f) + visit(f.AsStructField().Value(), visitor, order, depth+1, maxDepth) + } + } + if order == postOrder { + visitor.VisitExpr(expr) + } +} + +func matchIsConstantValue(e NavigableExpr) bool { + if e.Kind() == LiteralKind { + return true + } + if e.Kind() == StructKind || e.Kind() == MapKind || e.Kind() == ListKind { + for _, child := range e.Children() { + if !matchIsConstantValue(child) { + return false + } + } + return true + } + return false +} + +func newNavigableExpr(ast *AST, parent NavigableExpr, expr Expr, depth int) NavigableExpr { + // Reduce navigable expression nesting by unwrapping the embedded Expr value. + if nav, ok := expr.(*navigableExprImpl); ok { + expr = nav.Expr + } + nav := &navigableExprImpl{ + Expr: expr, + depth: depth, + ast: ast, + parent: parent, + createChildren: getChildFactory(expr), + } + return nav +} + +type navigableExprImpl struct { + Expr + depth int + ast *AST + parent NavigableExpr + createChildren childFactory +} + +func (nav *navigableExprImpl) Parent() (NavigableExpr, bool) { + if nav.parent != nil { + return nav.parent, true + } + return nil, false +} + +func (nav *navigableExprImpl) ID() int64 { + return nav.Expr.ID() +} + +func (nav *navigableExprImpl) Kind() ExprKind { + return nav.Expr.Kind() +} + +func (nav *navigableExprImpl) Type() *types.Type { + return nav.ast.GetType(nav.ID()) +} + +func (nav *navigableExprImpl) Children() []NavigableExpr { + return nav.createChildren(nav) +} + +func (nav *navigableExprImpl) Depth() int { + return nav.depth +} + +func (nav *navigableExprImpl) AsCall() CallExpr { + return navigableCallImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsComprehension() ComprehensionExpr { + return navigableComprehensionImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsIdent() string { + return nav.Expr.AsIdent() +} + +func (nav *navigableExprImpl) AsList() ListExpr { + return navigableListImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsLiteral() ref.Val { + return nav.Expr.AsLiteral() +} + +func (nav *navigableExprImpl) AsMap() MapExpr { + return navigableMapImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsSelect() SelectExpr { + return navigableSelectImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) AsStruct() StructExpr { + return navigableStructImpl{navigableExprImpl: nav} +} + +func (nav *navigableExprImpl) createChild(e Expr) NavigableExpr { + return newNavigableExpr(nav.ast, nav, e, nav.depth+1) +} + +func (nav *navigableExprImpl) isExpr() {} + +type navigableCallImpl struct { + *navigableExprImpl +} + +func (call navigableCallImpl) FunctionName() string { + return call.Expr.AsCall().FunctionName() +} + +func (call navigableCallImpl) IsMemberFunction() bool { + return call.Expr.AsCall().IsMemberFunction() +} + +func (call navigableCallImpl) Target() Expr { + t := call.Expr.AsCall().Target() + if t != nil { + return call.createChild(t) + } + return nil +} + +func (call navigableCallImpl) Args() []Expr { + args := call.Expr.AsCall().Args() + navArgs := make([]Expr, len(args)) + for i, a := range args { + navArgs[i] = call.createChild(a) + } + return navArgs +} + +type navigableComprehensionImpl struct { + *navigableExprImpl +} + +func (comp navigableComprehensionImpl) IterRange() Expr { + return comp.createChild(comp.Expr.AsComprehension().IterRange()) +} + +func (comp navigableComprehensionImpl) IterVar() string { + return comp.Expr.AsComprehension().IterVar() +} + +func (comp navigableComprehensionImpl) AccuVar() string { + return comp.Expr.AsComprehension().AccuVar() +} + +func (comp navigableComprehensionImpl) AccuInit() Expr { + return comp.createChild(comp.Expr.AsComprehension().AccuInit()) +} + +func (comp navigableComprehensionImpl) LoopCondition() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopCondition()) +} + +func (comp navigableComprehensionImpl) LoopStep() Expr { + return comp.createChild(comp.Expr.AsComprehension().LoopStep()) +} + +func (comp navigableComprehensionImpl) Result() Expr { + return comp.createChild(comp.Expr.AsComprehension().Result()) +} + +type navigableListImpl struct { + *navigableExprImpl +} + +func (l navigableListImpl) Elements() []Expr { + pbElems := l.Expr.AsList().Elements() + elems := make([]Expr, len(pbElems)) + for i := 0; i < len(pbElems); i++ { + elems[i] = l.createChild(pbElems[i]) + } + return elems +} + +func (l navigableListImpl) IsOptional(index int32) bool { + return l.Expr.AsList().IsOptional(index) +} + +func (l navigableListImpl) OptionalIndices() []int32 { + return l.Expr.AsList().OptionalIndices() +} + +func (l navigableListImpl) Size() int { + return l.Expr.AsList().Size() +} + +type navigableMapImpl struct { + *navigableExprImpl +} + +func (m navigableMapImpl) Entries() []EntryExpr { + mapExpr := m.Expr.AsMap() + entries := make([]EntryExpr, len(mapExpr.Entries())) + for i, e := range mapExpr.Entries() { + entry := e.AsMapEntry() + entries[i] = &entryExpr{ + id: e.ID(), + entryExprKindCase: navigableEntryImpl{ + key: m.createChild(entry.Key()), + val: m.createChild(entry.Value()), + isOpt: entry.IsOptional(), + }, + } + } + return entries +} + +func (m navigableMapImpl) Size() int { + return m.Expr.AsMap().Size() +} + +type navigableEntryImpl struct { + key NavigableExpr + val NavigableExpr + isOpt bool +} + +func (e navigableEntryImpl) Kind() EntryExprKind { + return MapEntryKind +} + +func (e navigableEntryImpl) Key() Expr { + return e.key +} + +func (e navigableEntryImpl) Value() Expr { + return e.val +} + +func (e navigableEntryImpl) IsOptional() bool { + return e.isOpt +} + +func (e navigableEntryImpl) renumberIDs(IDGenerator) {} + +func (e navigableEntryImpl) isEntryExpr() {} + +type navigableSelectImpl struct { + *navigableExprImpl +} + +func (sel navigableSelectImpl) FieldName() string { + return sel.Expr.AsSelect().FieldName() +} + +func (sel navigableSelectImpl) IsTestOnly() bool { + return sel.Expr.AsSelect().IsTestOnly() +} + +func (sel navigableSelectImpl) Operand() Expr { + return sel.createChild(sel.Expr.AsSelect().Operand()) +} + +type navigableStructImpl struct { + *navigableExprImpl +} + +func (s navigableStructImpl) TypeName() string { + return s.Expr.AsStruct().TypeName() +} + +func (s navigableStructImpl) Fields() []EntryExpr { + fieldInits := s.Expr.AsStruct().Fields() + fields := make([]EntryExpr, len(fieldInits)) + for i, f := range fieldInits { + field := f.AsStructField() + fields[i] = &entryExpr{ + id: f.ID(), + entryExprKindCase: navigableFieldImpl{ + name: field.Name(), + val: s.createChild(field.Value()), + isOpt: field.IsOptional(), + }, + } + } + return fields +} + +type navigableFieldImpl struct { + name string + val NavigableExpr + isOpt bool +} + +func (f navigableFieldImpl) Kind() EntryExprKind { + return StructFieldKind +} + +func (f navigableFieldImpl) Name() string { + return f.name +} + +func (f navigableFieldImpl) Value() Expr { + return f.val +} + +func (f navigableFieldImpl) IsOptional() bool { + return f.isOpt +} + +func (f navigableFieldImpl) renumberIDs(IDGenerator) {} + +func (f navigableFieldImpl) isEntryExpr() {} + +func getChildFactory(expr Expr) childFactory { + if expr == nil { + return noopFactory + } + switch expr.Kind() { + case LiteralKind: + return noopFactory + case IdentKind: + return noopFactory + case SelectKind: + return selectFactory + case CallKind: + return callArgFactory + case ListKind: + return listElemFactory + case MapKind: + return mapEntryFactory + case StructKind: + return structEntryFactory + case ComprehensionKind: + return comprehensionFactory + default: + return noopFactory + } +} + +type childFactory func(*navigableExprImpl) []NavigableExpr + +func noopFactory(*navigableExprImpl) []NavigableExpr { + return nil +} + +func selectFactory(nav *navigableExprImpl) []NavigableExpr { + return []NavigableExpr{nav.createChild(nav.AsSelect().Operand())} +} + +func callArgFactory(nav *navigableExprImpl) []NavigableExpr { + call := nav.Expr.AsCall() + argCount := len(call.Args()) + if call.IsMemberFunction() { + argCount++ + } + navExprs := make([]NavigableExpr, argCount) + i := 0 + if call.IsMemberFunction() { + navExprs[i] = nav.createChild(call.Target()) + i++ + } + for _, arg := range call.Args() { + navExprs[i] = nav.createChild(arg) + i++ + } + return navExprs +} + +func listElemFactory(nav *navigableExprImpl) []NavigableExpr { + l := nav.Expr.AsList() + navExprs := make([]NavigableExpr, len(l.Elements())) + for i, e := range l.Elements() { + navExprs[i] = nav.createChild(e) + } + return navExprs +} + +func structEntryFactory(nav *navigableExprImpl) []NavigableExpr { + s := nav.Expr.AsStruct() + entries := make([]NavigableExpr, len(s.Fields())) + for i, e := range s.Fields() { + f := e.AsStructField() + entries[i] = nav.createChild(f.Value()) + } + return entries +} + +func mapEntryFactory(nav *navigableExprImpl) []NavigableExpr { + m := nav.Expr.AsMap() + entries := make([]NavigableExpr, len(m.Entries())*2) + j := 0 + for _, e := range m.Entries() { + mapEntry := e.AsMapEntry() + entries[j] = nav.createChild(mapEntry.Key()) + entries[j+1] = nav.createChild(mapEntry.Value()) + j += 2 + } + return entries +} + +func comprehensionFactory(nav *navigableExprImpl) []NavigableExpr { + compre := nav.Expr.AsComprehension() + return []NavigableExpr{ + nav.createChild(compre.IterRange()), + nav.createChild(compre.AccuInit()), + nav.createChild(compre.LoopCondition()), + nav.createChild(compre.LoopStep()), + nav.createChild(compre.Result()), + } +} diff --git a/vendor/github.com/google/cel-go/common/containers/BUILD.bazel b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel new file mode 100644 index 00000000..81197f06 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/containers/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "container.go", + ], + importpath = "github.com/google/cel-go/common/containers", + deps = [ + "//common/ast:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "container_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/ast:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/containers/container.go b/vendor/github.com/google/cel-go/common/containers/container.go new file mode 100644 index 00000000..52153d4c --- /dev/null +++ b/vendor/github.com/google/cel-go/common/containers/container.go @@ -0,0 +1,316 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package containers defines types and functions for resolving qualified names within a namespace +// or type provided to CEL. +package containers + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/ast" +) + +var ( + // DefaultContainer has an empty container name. + DefaultContainer *Container = nil + + // Empty map to search for aliases when needed. + noAliases = make(map[string]string) +) + +// NewContainer creates a new Container with the fully-qualified name. +func NewContainer(opts ...ContainerOption) (*Container, error) { + var c *Container + var err error + for _, opt := range opts { + c, err = opt(c) + if err != nil { + return nil, err + } + } + return c, nil +} + +// Container holds a reference to an optional qualified container name and set of aliases. +// +// The program container can be used to simplify variable, function, and type specification within +// CEL programs and behaves more or less like a C++ namespace. See ResolveCandidateNames for more +// details. +type Container struct { + name string + aliases map[string]string +} + +// Extend creates a new Container with the existing settings and applies a series of +// ContainerOptions to further configure the new container. +func (c *Container) Extend(opts ...ContainerOption) (*Container, error) { + if c == nil { + return NewContainer(opts...) + } + // Copy the name and aliases of the existing container. + ext := &Container{name: c.Name()} + if len(c.aliasSet()) > 0 { + aliasSet := make(map[string]string, len(c.aliasSet())) + for k, v := range c.aliasSet() { + aliasSet[k] = v + } + ext.aliases = aliasSet + } + // Apply the new options to the container. + var err error + for _, opt := range opts { + ext, err = opt(ext) + if err != nil { + return nil, err + } + } + return ext, nil +} + +// Name returns the fully-qualified name of the container. +// +// The name may conceptually be a namespace, package, or type. +func (c *Container) Name() string { + if c == nil { + return "" + } + return c.name +} + +// ResolveCandidateNames returns the candidates name of namespaced identifiers in C++ resolution +// order. +// +// Names which shadow other names are returned first. If a name includes a leading dot ('.'), +// the name is treated as an absolute identifier which cannot be shadowed. +// +// Given a container name a.b.c.M.N and a type name R.s, this will deliver in order: +// +// a.b.c.M.N.R.s +// a.b.c.M.R.s +// a.b.c.R.s +// a.b.R.s +// a.R.s +// R.s +// +// If aliases or abbreviations are configured for the container, then alias names will take +// precedence over containerized names. +func (c *Container) ResolveCandidateNames(name string) []string { + if strings.HasPrefix(name, ".") { + qn := name[1:] + alias, isAlias := c.findAlias(qn) + if isAlias { + return []string{alias} + } + return []string{qn} + } + alias, isAlias := c.findAlias(name) + if isAlias { + return []string{alias} + } + if c.Name() == "" { + return []string{name} + } + nextCont := c.Name() + candidates := []string{nextCont + "." + name} + for i := strings.LastIndex(nextCont, "."); i >= 0; i = strings.LastIndex(nextCont, ".") { + nextCont = nextCont[:i] + candidates = append(candidates, nextCont+"."+name) + } + return append(candidates, name) +} + +// aliasSet returns the alias to fully-qualified name mapping stored in the container. +func (c *Container) aliasSet() map[string]string { + if c == nil || c.aliases == nil { + return noAliases + } + return c.aliases +} + +// findAlias takes a name as input and returns an alias expansion if one exists. +// +// If the name is qualified, the first component of the qualified name is checked against known +// aliases. Any alias that is found in a qualified name is expanded in the result: +// +// alias: R -> my.alias.R +// name: R.S.T +// output: my.alias.R.S.T +// +// Note, the name must not have a leading dot. +func (c *Container) findAlias(name string) (string, bool) { + // If an alias exists for the name, ensure it is searched last. + simple := name + qualifier := "" + dot := strings.Index(name, ".") + if dot >= 0 { + simple = name[0:dot] + qualifier = name[dot:] + } + alias, found := c.aliasSet()[simple] + if !found { + return "", false + } + return alias + qualifier, true +} + +// ContainerOption specifies a functional configuration option for a Container. +// +// Note, ContainerOption implementations must be able to handle nil container inputs. +type ContainerOption func(*Container) (*Container, error) + +// Abbrevs configures a set of simple names as abbreviations for fully-qualified names. +// +// An abbreviation (abbrev for short) is a simple name that expands to a fully-qualified name. +// Abbreviations can be useful when working with variables, functions, and especially types from +// multiple namespaces: +// +// // CEL object construction +// qual.pkg.version.ObjTypeName{ +// field: alt.container.ver.FieldTypeName{value: ...} +// } +// +// Only one the qualified names above may be used as the CEL container, so at least one of these +// references must be a long qualified name within an otherwise short CEL program. Using the +// following abbreviations, the program becomes much simpler: +// +// // CEL Go option +// Abbrevs("qual.pkg.version.ObjTypeName", "alt.container.ver.FieldTypeName") +// // Simplified Object construction +// ObjTypeName{field: FieldTypeName{value: ...}} +// +// There are a few rules for the qualified names and the simple abbreviations generated from them: +// - Qualified names must be dot-delimited, e.g. `package.subpkg.name`. +// - The last element in the qualified name is the abbreviation. +// - Abbreviations must not collide with each other. +// - The abbreviation must not collide with unqualified names in use. +// +// Abbreviations are distinct from container-based references in the following important ways: +// - Abbreviations must expand to a fully-qualified name. +// - Expanded abbreviations do not participate in namespace resolution. +// - Abbreviation expansion is done instead of the container search for a matching identifier. +// - Containers follow C++ namespace resolution rules with searches from the most qualified name +// to the least qualified name. +// - Container references within the CEL program may be relative, and are resolved to fully +// qualified names at either type-check time or program plan time, whichever comes first. +// +// If there is ever a case where an identifier could be in both the container and as an +// abbreviation, the abbreviation wins as this will ensure that the meaning of a program is +// preserved between compilations even as the container evolves. +func Abbrevs(qualifiedNames ...string) ContainerOption { + return func(c *Container) (*Container, error) { + for _, qn := range qualifiedNames { + ind := strings.LastIndex(qn, ".") + if ind <= 0 || ind >= len(qn)-1 { + return nil, fmt.Errorf( + "invalid qualified name: %s, wanted name of the form 'qualified.name'", qn) + } + alias := qn[ind+1:] + var err error + c, err = aliasAs("abbreviation", qn, alias)(c) + if err != nil { + return nil, err + } + } + return c, nil + } +} + +// Alias associates a fully-qualified name with a user-defined alias. +// +// In general, Abbrevs is preferred to Alias since the names generated from the Abbrevs option +// are more easily traced back to source code. The Alias option is useful for propagating alias +// configuration from one Container instance to another, and may also be useful for remapping +// poorly chosen protobuf message / package names. +// +// Note: all of the rules that apply to Abbrevs also apply to Alias. +func Alias(qualifiedName, alias string) ContainerOption { + return aliasAs("alias", qualifiedName, alias) +} + +func aliasAs(kind, qualifiedName, alias string) ContainerOption { + return func(c *Container) (*Container, error) { + if len(alias) == 0 || strings.Contains(alias, ".") { + return nil, fmt.Errorf( + "%s must be non-empty and simple (not qualified): %s=%s", kind, kind, alias) + } + + if qualifiedName[0:1] == "." { + return nil, fmt.Errorf("qualified name must not begin with a leading '.': %s", + qualifiedName) + } + ind := strings.LastIndex(qualifiedName, ".") + if ind <= 0 || ind == len(qualifiedName)-1 { + return nil, fmt.Errorf("%s must refer to a valid qualified name: %s", + kind, qualifiedName) + } + aliasRef, found := c.aliasSet()[alias] + if found { + return nil, fmt.Errorf( + "%s collides with existing reference: name=%s, %s=%s, existing=%s", + kind, qualifiedName, kind, alias, aliasRef) + } + if strings.HasPrefix(c.Name(), alias+".") || c.Name() == alias { + return nil, fmt.Errorf( + "%s collides with container name: name=%s, %s=%s, container=%s", + kind, qualifiedName, kind, alias, c.Name()) + } + if c == nil { + c = &Container{} + } + if c.aliases == nil { + c.aliases = make(map[string]string) + } + c.aliases[alias] = qualifiedName + return c, nil + } +} + +// Name sets the fully-qualified name of the Container. +func Name(name string) ContainerOption { + return func(c *Container) (*Container, error) { + if len(name) > 0 && name[0:1] == "." { + return nil, fmt.Errorf("container name must not contain a leading '.': %s", name) + } + if c.Name() == name { + return c, nil + } + if c == nil { + return &Container{name: name}, nil + } + c.name = name + return c, nil + } +} + +// ToQualifiedName converts an expression AST into a qualified name if possible, with a boolean +// 'found' value that indicates if the conversion is successful. +func ToQualifiedName(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + id := e.AsIdent() + return id, true + case ast.SelectKind: + sel := e.AsSelect() + // Test only expressions are not valid as qualified names. + if sel.IsTestOnly() { + return "", false + } + if qual, found := ToQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true + } + } + return "", false +} diff --git a/vendor/github.com/google/cel-go/common/cost.go b/vendor/github.com/google/cel-go/common/cost.go new file mode 100644 index 00000000..5e24bd0f --- /dev/null +++ b/vendor/github.com/google/cel-go/common/cost.go @@ -0,0 +1,40 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +const ( + // SelectAndIdentCost is the cost of an operation that accesses an identifier or performs a select. + SelectAndIdentCost = 1 + + // ConstCost is the cost of an operation that accesses a constant. + ConstCost = 0 + + // ListCreateBaseCost is the base cost of any operation that creates a new list. + ListCreateBaseCost = 10 + + // MapCreateBaseCost is the base cost of any operation that creates a new map. + MapCreateBaseCost = 30 + + // StructCreateBaseCost is the base cost of any operation that creates a new struct. + StructCreateBaseCost = 40 + + // StringTraversalCostFactor is multiplied to a length of a string when computing the cost of traversing the entire + // string once. + StringTraversalCostFactor = 0.1 + + // RegexStringLengthCostFactor is multiplied ot the length of a regex string pattern when computing the cost of + // applying the regex to a string of unit cost. + RegexStringLengthCostFactor = 0.25 +) diff --git a/vendor/github.com/google/cel-go/common/debug/BUILD.bazel b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel new file mode 100644 index 00000000..724ed340 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/debug/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "debug.go", + ], + importpath = "github.com/google/cel-go/common/debug", + deps = [ + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/debug/debug.go b/vendor/github.com/google/cel-go/common/debug/debug.go new file mode 100644 index 00000000..e4c01ac6 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/debug/debug.go @@ -0,0 +1,309 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package debug provides tools to print a parsed expression graph and +// adorn each expression element with additional metadata. +package debug + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Adorner returns debug metadata that will be tacked on to the string +// representation of an expression. +type Adorner interface { + // GetMetadata for the input context. + GetMetadata(ctx any) string +} + +// Writer manages writing expressions to an internal string. +type Writer interface { + fmt.Stringer + + // Buffer pushes an expression into an internal queue of expressions to + // write to a string. + Buffer(e ast.Expr) +} + +type emptyDebugAdorner struct { +} + +var emptyAdorner Adorner = &emptyDebugAdorner{} + +func (a *emptyDebugAdorner) GetMetadata(e any) string { + return "" +} + +// ToDebugString gives the unadorned string representation of the Expr. +func ToDebugString(e ast.Expr) string { + return ToAdornedDebugString(e, emptyAdorner) +} + +// ToAdornedDebugString gives the adorned string representation of the Expr. +func ToAdornedDebugString(e ast.Expr, adorner Adorner) string { + w := newDebugWriter(adorner) + w.Buffer(e) + return w.String() +} + +// debugWriter is used to print out pretty-printed debug strings. +type debugWriter struct { + adorner Adorner + buffer bytes.Buffer + indent int + lineStart bool +} + +func newDebugWriter(a Adorner) *debugWriter { + return &debugWriter{ + adorner: a, + indent: 0, + lineStart: true, + } +} + +func (w *debugWriter) Buffer(e ast.Expr) { + if e == nil { + return + } + switch e.Kind() { + case ast.LiteralKind: + w.append(formatLiteral(e.AsLiteral())) + case ast.IdentKind: + w.append(e.AsIdent()) + case ast.SelectKind: + w.appendSelect(e.AsSelect()) + case ast.CallKind: + w.appendCall(e.AsCall()) + case ast.ListKind: + w.appendList(e.AsList()) + case ast.MapKind: + w.appendMap(e.AsMap()) + case ast.StructKind: + w.appendStruct(e.AsStruct()) + case ast.ComprehensionKind: + w.appendComprehension(e.AsComprehension()) + } + w.adorn(e) +} + +func (w *debugWriter) appendSelect(sel ast.SelectExpr) { + w.Buffer(sel.Operand()) + w.append(".") + w.append(sel.FieldName()) + if sel.IsTestOnly() { + w.append("~test-only~") + } +} + +func (w *debugWriter) appendCall(call ast.CallExpr) { + if call.IsMemberFunction() { + w.Buffer(call.Target()) + w.append(".") + } + w.append(call.FunctionName()) + w.append("(") + if len(call.Args()) > 0 { + w.addIndent() + w.appendLine() + for i, arg := range call.Args() { + if i > 0 { + w.append(",") + w.appendLine() + } + w.Buffer(arg) + } + w.removeIndent() + w.appendLine() + } + w.append(")") +} + +func (w *debugWriter) appendList(list ast.ListExpr) { + w.append("[") + if len(list.Elements()) > 0 { + w.appendLine() + w.addIndent() + for i, elem := range list.Elements() { + if i > 0 { + w.append(",") + w.appendLine() + } + w.Buffer(elem) + } + w.removeIndent() + w.appendLine() + } + w.append("]") +} + +func (w *debugWriter) appendStruct(obj ast.StructExpr) { + w.append(obj.TypeName()) + w.append("{") + if len(obj.Fields()) > 0 { + w.appendLine() + w.addIndent() + for i, f := range obj.Fields() { + field := f.AsStructField() + if i > 0 { + w.append(",") + w.appendLine() + } + if field.IsOptional() { + w.append("?") + } + w.append(field.Name()) + w.append(":") + w.Buffer(field.Value()) + w.adorn(f) + } + w.removeIndent() + w.appendLine() + } + w.append("}") +} + +func (w *debugWriter) appendMap(m ast.MapExpr) { + w.append("{") + if m.Size() > 0 { + w.appendLine() + w.addIndent() + for i, e := range m.Entries() { + entry := e.AsMapEntry() + if i > 0 { + w.append(",") + w.appendLine() + } + if entry.IsOptional() { + w.append("?") + } + w.Buffer(entry.Key()) + w.append(":") + w.Buffer(entry.Value()) + w.adorn(e) + } + w.removeIndent() + w.appendLine() + } + w.append("}") +} + +func (w *debugWriter) appendComprehension(comprehension ast.ComprehensionExpr) { + w.append("__comprehension__(") + w.addIndent() + w.appendLine() + w.append("// Variable") + w.appendLine() + w.append(comprehension.IterVar()) + w.append(",") + w.appendLine() + w.append("// Target") + w.appendLine() + w.Buffer(comprehension.IterRange()) + w.append(",") + w.appendLine() + w.append("// Accumulator") + w.appendLine() + w.append(comprehension.AccuVar()) + w.append(",") + w.appendLine() + w.append("// Init") + w.appendLine() + w.Buffer(comprehension.AccuInit()) + w.append(",") + w.appendLine() + w.append("// LoopCondition") + w.appendLine() + w.Buffer(comprehension.LoopCondition()) + w.append(",") + w.appendLine() + w.append("// LoopStep") + w.appendLine() + w.Buffer(comprehension.LoopStep()) + w.append(",") + w.appendLine() + w.append("// Result") + w.appendLine() + w.Buffer(comprehension.Result()) + w.append(")") + w.removeIndent() +} + +func formatLiteral(c ref.Val) string { + switch v := c.(type) { + case types.Bool: + return fmt.Sprintf("%t", v) + case types.Bytes: + return fmt.Sprintf("b\"%s\"", string(v)) + case types.Double: + return fmt.Sprintf("%v", float64(v)) + case types.Int: + return fmt.Sprintf("%d", int64(v)) + case types.String: + return strconv.Quote(string(v)) + case types.Uint: + return fmt.Sprintf("%du", uint64(v)) + case types.Null: + return "null" + default: + panic("Unknown constant type") + } +} + +func (w *debugWriter) append(s string) { + w.doIndent() + w.buffer.WriteString(s) +} + +func (w *debugWriter) appendFormat(f string, args ...any) { + w.append(fmt.Sprintf(f, args...)) +} + +func (w *debugWriter) doIndent() { + if w.lineStart { + w.lineStart = false + w.buffer.WriteString(strings.Repeat(" ", w.indent)) + } +} + +func (w *debugWriter) adorn(e any) { + w.append(w.adorner.GetMetadata(e)) +} + +func (w *debugWriter) appendLine() { + w.buffer.WriteString("\n") + w.lineStart = true +} + +func (w *debugWriter) addIndent() { + w.indent++ +} + +func (w *debugWriter) removeIndent() { + w.indent-- + if w.indent < 0 { + panic("negative indent") + } +} + +func (w *debugWriter) String() string { + return w.buffer.String() +} diff --git a/vendor/github.com/google/cel-go/common/decls/BUILD.bazel b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel new file mode 100644 index 00000000..17791dce --- /dev/null +++ b/vendor/github.com/google/cel-go/common/decls/BUILD.bazel @@ -0,0 +1,39 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "decls.go", + ], + importpath = "github.com/google/cel-go/common/decls", + deps = [ + "//checker/decls:go_default_library", + "//common/functions:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "decls_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//checker/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/decls/decls.go b/vendor/github.com/google/cel-go/common/decls/decls.go new file mode 100644 index 00000000..0a42f81d --- /dev/null +++ b/vendor/github.com/google/cel-go/common/decls/decls.go @@ -0,0 +1,846 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package decls contains function and variable declaration structs and helper methods. +package decls + +import ( + "fmt" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// NewFunction creates a new function declaration with a set of function options to configure overloads +// and function definitions (implementations). +// +// Functions are checked for name collisions and singleton redefinition. +func NewFunction(name string, opts ...FunctionOpt) (*FunctionDecl, error) { + fn := &FunctionDecl{ + name: name, + overloads: map[string]*OverloadDecl{}, + overloadOrdinals: []string{}, + } + var err error + for _, opt := range opts { + fn, err = opt(fn) + if err != nil { + return nil, err + } + } + if len(fn.overloads) == 0 { + return nil, fmt.Errorf("function %s must have at least one overload", name) + } + return fn, nil +} + +// FunctionDecl defines a function name, overload set, and optionally a singleton definition for all +// overload instances. +type FunctionDecl struct { + name string + + // overloads associated with the function name. + overloads map[string]*OverloadDecl + + // singleton implementation of the function for all overloads. + // + // If this option is set, an error will occur if any overloads specify a per-overload implementation + // or if another function with the same name attempts to redefine the singleton. + singleton *functions.Overload + + // disableTypeGuards is a performance optimization to disable detailed runtime type checks which could + // add overhead on common operations. Setting this option true leaves error checks and argument checks + // intact. + disableTypeGuards bool + + // state indicates that the binding should be provided as a declaration, as a runtime binding, or both. + state declarationState + + // overloadOrdinals indicates the order in which the overload was declared. + overloadOrdinals []string +} + +type declarationState int + +const ( + declarationStateUnset declarationState = iota + declarationDisabled + declarationEnabled +) + +// Name returns the function name in human-readable terms, e.g. 'contains' of 'math.least' +func (f *FunctionDecl) Name() string { + if f == nil { + return "" + } + return f.name +} + +// IsDeclarationDisabled indicates that the function implementation should be added to the dispatcher, but the +// declaration should not be exposed for use in expressions. +func (f *FunctionDecl) IsDeclarationDisabled() bool { + return f.state == declarationDisabled +} + +// Merge combines an existing function declaration with another. +// +// If a function is extended, by say adding new overloads to an existing function, then it is merged with the +// prior definition of the function at which point its overloads must not collide with pre-existing overloads +// and its bindings (singleton, or per-overload) must not conflict with previous definitions either. +func (f *FunctionDecl) Merge(other *FunctionDecl) (*FunctionDecl, error) { + if f == other { + return f, nil + } + if f.Name() != other.Name() { + return nil, fmt.Errorf("cannot merge unrelated functions. %s and %s", f.Name(), other.Name()) + } + merged := &FunctionDecl{ + name: f.Name(), + overloads: make(map[string]*OverloadDecl, len(f.overloads)), + singleton: f.singleton, + overloadOrdinals: make([]string, len(f.overloads)), + // if one function is expecting type-guards and the other is not, then they + // must not be disabled. + disableTypeGuards: f.disableTypeGuards && other.disableTypeGuards, + // default to the current functions declaration state. + state: f.state, + } + // If the other state indicates that the declaration should be explicitly enabled or + // disabled, then update the merged state with the most recent value. + if other.state != declarationStateUnset { + merged.state = other.state + } + // baseline copy of the overloads and their ordinals + copy(merged.overloadOrdinals, f.overloadOrdinals) + for oID, o := range f.overloads { + merged.overloads[oID] = o + } + // overloads and their ordinals are added from the left + for _, oID := range other.overloadOrdinals { + o := other.overloads[oID] + err := merged.AddOverload(o) + if err != nil { + return nil, fmt.Errorf("function declaration merge failed: %v", err) + } + } + if other.singleton != nil { + if merged.singleton != nil && merged.singleton != other.singleton { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + merged.singleton = other.singleton + } + return merged, nil +} + +// AddOverload ensures that the new overload does not collide with an existing overload signature; +// however, if the function signatures are identical, the implementation may be rewritten as its +// difficult to compare functions by object identity. +func (f *FunctionDecl) AddOverload(overload *OverloadDecl) error { + if f == nil { + return fmt.Errorf("nil function cannot add overload: %s", overload.ID()) + } + for oID, o := range f.overloads { + if oID != overload.ID() && o.SignatureOverlaps(overload) { + return fmt.Errorf("overload signature collision in function %s: %s collides with %s", f.Name(), oID, overload.ID()) + } + if oID == overload.ID() { + if o.SignatureEquals(overload) && o.IsNonStrict() == overload.IsNonStrict() { + // Allow redefinition of an overload implementation so long as the signatures match. + if overload.hasBinding() { + f.overloads[oID] = overload + } + return nil + } + return fmt.Errorf("overload redefinition in function. %s: %s has multiple definitions", f.Name(), oID) + } + } + f.overloadOrdinals = append(f.overloadOrdinals, overload.ID()) + f.overloads[overload.ID()] = overload + return nil +} + +// OverloadDecls returns the overload declarations in the order in which they were declared. +func (f *FunctionDecl) OverloadDecls() []*OverloadDecl { + if f == nil { + return []*OverloadDecl{} + } + overloads := make([]*OverloadDecl, 0, len(f.overloads)) + for _, oID := range f.overloadOrdinals { + overloads = append(overloads, f.overloads[oID]) + } + return overloads +} + +// Bindings produces a set of function bindings, if any are defined. +func (f *FunctionDecl) Bindings() ([]*functions.Overload, error) { + if f == nil { + return []*functions.Overload{}, nil + } + overloads := []*functions.Overload{} + nonStrict := false + for _, oID := range f.overloadOrdinals { + o := f.overloads[oID] + if o.hasBinding() { + overload := &functions.Overload{ + Operator: o.ID(), + Unary: o.guardedUnaryOp(f.Name(), f.disableTypeGuards), + Binary: o.guardedBinaryOp(f.Name(), f.disableTypeGuards), + Function: o.guardedFunctionOp(f.Name(), f.disableTypeGuards), + OperandTrait: o.OperandTrait(), + NonStrict: o.IsNonStrict(), + } + overloads = append(overloads, overload) + nonStrict = nonStrict || o.IsNonStrict() + } + } + if f.singleton != nil { + if len(overloads) != 0 { + return nil, fmt.Errorf("singleton function incompatible with specialized overloads: %s", f.Name()) + } + overloads = []*functions.Overload{ + { + Operator: f.Name(), + Unary: f.singleton.Unary, + Binary: f.singleton.Binary, + Function: f.singleton.Function, + OperandTrait: f.singleton.OperandTrait, + }, + } + // fall-through to return single overload case. + } + if len(overloads) == 0 { + return overloads, nil + } + // Single overload. Replicate an entry for it using the function name as well. + if len(overloads) == 1 { + if overloads[0].Operator == f.Name() { + return overloads, nil + } + return append(overloads, &functions.Overload{ + Operator: f.Name(), + Unary: overloads[0].Unary, + Binary: overloads[0].Binary, + Function: overloads[0].Function, + NonStrict: overloads[0].NonStrict, + OperandTrait: overloads[0].OperandTrait, + }), nil + } + // All of the defined overloads are wrapped into a top-level function which + // performs dynamic dispatch to the proper overload based on the argument types. + bindings := append([]*functions.Overload{}, overloads...) + funcDispatch := func(args ...ref.Val) ref.Val { + for _, oID := range f.overloadOrdinals { + o := f.overloads[oID] + // During dynamic dispatch over multiple functions, signature agreement checks + // are preserved in order to assist with the function resolution step. + switch len(args) { + case 1: + if o.unaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.unaryOp(args[0]) + } + case 2: + if o.binaryOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.binaryOp(args[0], args[1]) + } + } + if o.functionOp != nil && o.matchesRuntimeSignature( /* disableTypeGuards=*/ false, args...) { + return o.functionOp(args...) + } + // eventually this will fall through to the noSuchOverload below. + } + return MaybeNoSuchOverload(f.Name(), args...) + } + function := &functions.Overload{ + Operator: f.Name(), + Function: funcDispatch, + NonStrict: nonStrict, + } + return append(bindings, function), nil +} + +// MaybeNoSuchOverload determines whether to propagate an error if one is provided as an argument, or +// to return an unknown set, or to produce a new error for a missing function signature. +func MaybeNoSuchOverload(funcName string, args ...ref.Val) ref.Val { + argTypes := make([]string, len(args)) + var unk *types.Unknown = nil + for i, arg := range args { + if types.IsError(arg) { + return arg + } + if types.IsUnknown(arg) { + unk = types.MergeUnknowns(arg.(*types.Unknown), unk) + } + argTypes[i] = arg.Type().TypeName() + } + if unk != nil { + return unk + } + signature := strings.Join(argTypes, ", ") + return types.NewErr("no such overload: %s(%s)", funcName, signature) +} + +// FunctionOpt defines a functional option for mutating a function declaration. +type FunctionOpt func(*FunctionDecl) (*FunctionDecl, error) + +// DisableTypeGuards disables automatically generated function invocation guards on direct overload calls. +// Type guards remain on during dynamic dispatch for parsed-only expressions. +func DisableTypeGuards(value bool) FunctionOpt { + return func(fn *FunctionDecl) (*FunctionDecl, error) { + fn.disableTypeGuards = value + return fn, nil + } +} + +// DisableDeclaration indicates that the function declaration should be disabled, but the runtime function +// binding should be provided. Marking a function as runtime-only is a safe way to manage deprecations +// of function declarations while still preserving the runtime behavior for previously compiled expressions. +func DisableDeclaration(value bool) FunctionOpt { + return func(fn *FunctionDecl) (*FunctionDecl, error) { + if value { + fn.state = declarationDisabled + } else { + fn.state = declarationEnabled + } + return fn, nil + } +} + +// SingletonUnaryBinding creates a singleton function definition to be used for all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonUnaryBinding(fn functions.UnaryOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Unary: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// SingletonBinaryBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonBinaryBinding(fn functions.BinaryOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Binary: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// SingletonFunctionBinding creates a singleton function definition to be used with all function overloads. +// +// Note, this approach works well if operand is expected to have a specific trait which it implements, +// e.g. traits.ContainerType. Otherwise, prefer per-overload function bindings. +func SingletonFunctionBinding(fn functions.FunctionOp, traits ...int) FunctionOpt { + trait := 0 + for _, t := range traits { + trait = trait | t + } + return func(f *FunctionDecl) (*FunctionDecl, error) { + if f.singleton != nil { + return nil, fmt.Errorf("function already has a singleton binding: %s", f.Name()) + } + f.singleton = &functions.Overload{ + Operator: f.Name(), + Function: fn, + OperandTrait: trait, + } + return f, nil + } +} + +// Overload defines a new global overload with an overload id, argument types, and result type. Through the +// use of OverloadOpt options, the overload may also be configured with a binding, an operand trait, and to +// be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func Overload(overloadID string, + args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return newOverload(overloadID, false, args, resultType, opts...) +} + +// MemberOverload defines a new receiver-style overload (or member function) with an overload id, argument types, +// and result type. Through the use of OverloadOpt options, the overload may also be configured with a binding, +// an operand trait, and to be non-strict. +// +// Note: function bindings should be commonly configured with Overload instances whereas operand traits and +// strict-ness should be rare occurrences. +func MemberOverload(overloadID string, + args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return newOverload(overloadID, true, args, resultType, opts...) +} + +func newOverload(overloadID string, + memberFunction bool, args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) FunctionOpt { + return func(f *FunctionDecl) (*FunctionDecl, error) { + overload, err := newOverloadInternal(overloadID, memberFunction, args, resultType, opts...) + if err != nil { + return nil, err + } + err = f.AddOverload(overload) + if err != nil { + return nil, err + } + return f, nil + } +} + +func newOverloadInternal(overloadID string, + memberFunction bool, args []*types.Type, resultType *types.Type, + opts ...OverloadOpt) (*OverloadDecl, error) { + overload := &OverloadDecl{ + id: overloadID, + argTypes: args, + resultType: resultType, + isMemberFunction: memberFunction, + } + var err error + for _, opt := range opts { + overload, err = opt(overload) + if err != nil { + return nil, err + } + } + return overload, nil +} + +// OverloadDecl contains the definition of a single overload id with a specific signature, and an optional +// implementation. +type OverloadDecl struct { + id string + argTypes []*types.Type + resultType *types.Type + isMemberFunction bool + // nonStrict indicates that the function will accept error and unknown arguments as inputs. + nonStrict bool + // operandTrait indicates whether the member argument should have a specific type-trait. + // + // This is useful for creating overloads which operate on a type-interface rather than a concrete type. + operandTrait int + + // Function implementation options. Optional, but encouraged. + // unaryOp is a function binding that takes a single argument. + unaryOp functions.UnaryOp + // binaryOp is a function binding that takes two arguments. + binaryOp functions.BinaryOp + // functionOp is a catch-all for zero-arity and three-plus arity functions. + functionOp functions.FunctionOp +} + +// ID mirrors the overload signature and provides a unique id which may be referenced within the type-checker +// and interpreter to optimize performance. +// +// The ID format is usually one of two styles: +// global: __ +// member: ___ +func (o *OverloadDecl) ID() string { + if o == nil { + return "" + } + return o.id +} + +// ArgTypes contains the set of argument types expected by the overload. +// +// For member functions ArgTypes[0] represents the member operand type. +func (o *OverloadDecl) ArgTypes() []*types.Type { + if o == nil { + return emptyArgs + } + return o.argTypes +} + +// IsMemberFunction indicates whether the overload is a member function +func (o *OverloadDecl) IsMemberFunction() bool { + if o == nil { + return false + } + return o.isMemberFunction +} + +// IsNonStrict returns whether the overload accepts errors and unknown values as arguments. +func (o *OverloadDecl) IsNonStrict() bool { + if o == nil { + return false + } + return o.nonStrict +} + +// OperandTrait returns the trait mask of the first operand to the overload call, e.g. +// `traits.Indexer` +func (o *OverloadDecl) OperandTrait() int { + if o == nil { + return 0 + } + return o.operandTrait +} + +// ResultType indicates the output type from calling the function. +func (o *OverloadDecl) ResultType() *types.Type { + if o == nil { + // *types.Type is nil-safe + return nil + } + return o.resultType +} + +// TypeParams returns the type parameter names associated with the overload. +func (o *OverloadDecl) TypeParams() []string { + typeParams := map[string]struct{}{} + collectParamNames(typeParams, o.ResultType()) + for _, arg := range o.ArgTypes() { + collectParamNames(typeParams, arg) + } + params := make([]string, 0, len(typeParams)) + for param := range typeParams { + params = append(params, param) + } + return params +} + +// SignatureEquals determines whether the incoming overload declaration signature is equal to the current signature. +// +// Result type, operand trait, and strict-ness are not considered as part of signature equality. +func (o *OverloadDecl) SignatureEquals(other *OverloadDecl) bool { + if o == other { + return true + } + if o.ID() != other.ID() || o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { + return false + } + for i, at := range o.ArgTypes() { + oat := other.ArgTypes()[i] + if !at.IsEquivalentType(oat) { + return false + } + } + return o.ResultType().IsEquivalentType(other.ResultType()) +} + +// SignatureOverlaps indicates whether two functions have non-equal, but overloapping function signatures. +// +// For example, list(dyn) collides with list(string) since the 'dyn' type can contain a 'string' type. +func (o *OverloadDecl) SignatureOverlaps(other *OverloadDecl) bool { + if o.IsMemberFunction() != other.IsMemberFunction() || len(o.ArgTypes()) != len(other.ArgTypes()) { + return false + } + argsOverlap := true + for i, argType := range o.ArgTypes() { + otherArgType := other.ArgTypes()[i] + argsOverlap = argsOverlap && + (argType.IsAssignableType(otherArgType) || + otherArgType.IsAssignableType(argType)) + } + return argsOverlap +} + +// hasBinding indicates whether the overload already has a definition. +func (o *OverloadDecl) hasBinding() bool { + return o != nil && (o.unaryOp != nil || o.binaryOp != nil || o.functionOp != nil) +} + +// guardedUnaryOp creates an invocation guard around the provided unary operator, if one is defined. +func (o *OverloadDecl) guardedUnaryOp(funcName string, disableTypeGuards bool) functions.UnaryOp { + if o.unaryOp == nil { + return nil + } + return func(arg ref.Val) ref.Val { + if !o.matchesRuntimeUnarySignature(disableTypeGuards, arg) { + return MaybeNoSuchOverload(funcName, arg) + } + return o.unaryOp(arg) + } +} + +// guardedBinaryOp creates an invocation guard around the provided binary operator, if one is defined. +func (o *OverloadDecl) guardedBinaryOp(funcName string, disableTypeGuards bool) functions.BinaryOp { + if o.binaryOp == nil { + return nil + } + return func(arg1, arg2 ref.Val) ref.Val { + if !o.matchesRuntimeBinarySignature(disableTypeGuards, arg1, arg2) { + return MaybeNoSuchOverload(funcName, arg1, arg2) + } + return o.binaryOp(arg1, arg2) + } +} + +// guardedFunctionOp creates an invocation guard around the provided variadic function binding, if one is provided. +func (o *OverloadDecl) guardedFunctionOp(funcName string, disableTypeGuards bool) functions.FunctionOp { + if o.functionOp == nil { + return nil + } + return func(args ...ref.Val) ref.Val { + if !o.matchesRuntimeSignature(disableTypeGuards, args...) { + return MaybeNoSuchOverload(funcName, args...) + } + return o.functionOp(args...) + } +} + +// matchesRuntimeUnarySignature indicates whether the argument type is runtime assiganble to the overload's expected argument. +func (o *OverloadDecl) matchesRuntimeUnarySignature(disableTypeGuards bool, arg ref.Val) bool { + return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg) && + matchOperandTrait(o.OperandTrait(), arg) +} + +// matchesRuntimeBinarySignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *OverloadDecl) matchesRuntimeBinarySignature(disableTypeGuards bool, arg1, arg2 ref.Val) bool { + return matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[0], arg1) && + matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[1], arg2) && + matchOperandTrait(o.OperandTrait(), arg1) +} + +// matchesRuntimeSignature indicates whether the argument types are runtime assiganble to the overload's expected arguments. +func (o *OverloadDecl) matchesRuntimeSignature(disableTypeGuards bool, args ...ref.Val) bool { + if len(args) != len(o.ArgTypes()) { + return false + } + if len(args) == 0 { + return true + } + for i, arg := range args { + if !matchRuntimeArgType(o.IsNonStrict(), disableTypeGuards, o.ArgTypes()[i], arg) { + return false + } + } + return matchOperandTrait(o.OperandTrait(), args[0]) +} + +func matchRuntimeArgType(nonStrict, disableTypeGuards bool, argType *types.Type, arg ref.Val) bool { + if nonStrict && (disableTypeGuards || types.IsUnknownOrError(arg)) { + return true + } + if types.IsUnknownOrError(arg) { + return false + } + return disableTypeGuards || argType.IsAssignableRuntimeType(arg) +} + +func matchOperandTrait(trait int, arg ref.Val) bool { + return trait == 0 || arg.Type().HasTrait(trait) || types.IsUnknownOrError(arg) +} + +// OverloadOpt is a functional option for configuring a function overload. +type OverloadOpt func(*OverloadDecl) (*OverloadDecl, error) + +// UnaryBinding provides the implementation of a unary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func UnaryBinding(binding functions.UnaryOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + if len(o.ArgTypes()) != 1 { + return nil, fmt.Errorf("unary function bound to non-unary overload: %s", o.ID()) + } + o.unaryOp = binding + return o, nil + } +} + +// BinaryBinding provides the implementation of a binary overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func BinaryBinding(binding functions.BinaryOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + if len(o.ArgTypes()) != 2 { + return nil, fmt.Errorf("binary function bound to non-binary overload: %s", o.ID()) + } + o.binaryOp = binding + return o, nil + } +} + +// FunctionBinding provides the implementation of a variadic overload. The provided function is protected by a runtime +// type-guard which ensures runtime type agreement between the overload signature and runtime argument types. +func FunctionBinding(binding functions.FunctionOp) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + if o.hasBinding() { + return nil, fmt.Errorf("overload already has a binding: %s", o.ID()) + } + o.functionOp = binding + return o, nil + } +} + +// OverloadIsNonStrict enables the function to be called with error and unknown argument values. +// +// Note: do not use this option unless absoluately necessary as it should be an uncommon feature. +func OverloadIsNonStrict() OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + o.nonStrict = true + return o, nil + } +} + +// OverloadOperandTrait configures a set of traits which the first argument to the overload must implement in order to be +// successfully invoked. +func OverloadOperandTrait(trait int) OverloadOpt { + return func(o *OverloadDecl) (*OverloadDecl, error) { + o.operandTrait = trait + return o, nil + } +} + +// NewConstant creates a new constant declaration. +func NewConstant(name string, t *types.Type, v ref.Val) *VariableDecl { + return &VariableDecl{name: name, varType: t, value: v} +} + +// NewVariable creates a new variable declaration. +func NewVariable(name string, t *types.Type) *VariableDecl { + return &VariableDecl{name: name, varType: t} +} + +// VariableDecl defines a variable declaration which may optionally have a constant value. +type VariableDecl struct { + name string + varType *types.Type + value ref.Val +} + +// Name returns the fully-qualified variable name +func (v *VariableDecl) Name() string { + if v == nil { + return "" + } + return v.name +} + +// Type returns the types.Type value associated with the variable. +func (v *VariableDecl) Type() *types.Type { + if v == nil { + // types.Type is nil-safe + return nil + } + return v.varType +} + +// Value returns the constant value associated with the declaration. +func (v *VariableDecl) Value() ref.Val { + if v == nil { + return nil + } + return v.value +} + +// DeclarationIsEquivalent returns true if one variable declaration has the same name and same type as the input. +func (v *VariableDecl) DeclarationIsEquivalent(other *VariableDecl) bool { + if v == other { + return true + } + return v.Name() == other.Name() && v.Type().IsEquivalentType(other.Type()) +} + +// VariableDeclToExprDecl converts a go-native variable declaration into a protobuf-type variable declaration. +func VariableDeclToExprDecl(v *VariableDecl) (*exprpb.Decl, error) { + varType, err := types.TypeToExprType(v.Type()) + if err != nil { + return nil, err + } + return chkdecls.NewVar(v.Name(), varType), nil +} + +// TypeVariable creates a new type identifier for use within a types.Provider +func TypeVariable(t *types.Type) *VariableDecl { + return NewVariable(t.TypeName(), types.NewTypeTypeWithParam(t)) +} + +// FunctionDeclToExprDecl converts a go-native function declaration into a protobuf-typed function declaration. +func FunctionDeclToExprDecl(f *FunctionDecl) (*exprpb.Decl, error) { + overloads := make([]*exprpb.Decl_FunctionDecl_Overload, len(f.overloads)) + for i, oID := range f.overloadOrdinals { + o := f.overloads[oID] + paramNames := map[string]struct{}{} + argTypes := make([]*exprpb.Type, len(o.ArgTypes())) + for j, a := range o.ArgTypes() { + collectParamNames(paramNames, a) + at, err := types.TypeToExprType(a) + if err != nil { + return nil, err + } + argTypes[j] = at + } + collectParamNames(paramNames, o.ResultType()) + resultType, err := types.TypeToExprType(o.ResultType()) + if err != nil { + return nil, err + } + if len(paramNames) == 0 { + if o.IsMemberFunction() { + overloads[i] = chkdecls.NewInstanceOverload(oID, argTypes, resultType) + } else { + overloads[i] = chkdecls.NewOverload(oID, argTypes, resultType) + } + } else { + params := []string{} + for pn := range paramNames { + params = append(params, pn) + } + if o.IsMemberFunction() { + overloads[i] = chkdecls.NewParameterizedInstanceOverload(oID, argTypes, resultType, params) + } else { + overloads[i] = chkdecls.NewParameterizedOverload(oID, argTypes, resultType, params) + } + } + } + return chkdecls.NewFunction(f.Name(), overloads...), nil +} + +func collectParamNames(paramNames map[string]struct{}, arg *types.Type) { + if arg.Kind() == types.TypeParamKind { + paramNames[arg.TypeName()] = struct{}{} + } + for _, param := range arg.Parameters() { + collectParamNames(paramNames, param) + } +} + +var ( + emptyArgs = []*types.Type{} +) diff --git a/vendor/github.com/google/cel-go/common/doc.go b/vendor/github.com/google/cel-go/common/doc.go new file mode 100644 index 00000000..5362fdfe --- /dev/null +++ b/vendor/github.com/google/cel-go/common/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package common defines types and utilities common to expression parsing, +// checking, and interpretation +package common diff --git a/vendor/github.com/google/cel-go/common/error.go b/vendor/github.com/google/cel-go/common/error.go new file mode 100644 index 00000000..0cf21345 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/error.go @@ -0,0 +1,74 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// NewError creates an error associated with an expression id with the given message at the given location. +func NewError(id int64, message string, location Location) *Error { + return &Error{Message: message, Location: location, ExprID: id} +} + +// Error type which references an expression id, a location within source, and a message. +type Error struct { + Location Location + Message string + ExprID int64 +} + +const ( + dot = "." + ind = "^" + wideDot = "\uff0e" + wideInd = "\uff3e" + + // maxSnippetLength is the largest number of characters which can be rendered in an error message snippet. + maxSnippetLength = 16384 +) + +// ToDisplayString decorates the error message with the source location. +func (e *Error) ToDisplayString(source Source) string { + var result = fmt.Sprintf("ERROR: %s:%d:%d: %s", + source.Description(), + e.Location.Line(), + e.Location.Column()+1, // add one to the 0-based column for display + e.Message) + if snippet, found := source.Snippet(e.Location.Line()); found && len(snippet) <= maxSnippetLength { + snippet := strings.Replace(snippet, "\t", " ", -1) + srcLine := "\n | " + snippet + var bytes = []byte(snippet) + var indLine = "\n | " + for i := 0; i < e.Location.Column() && len(bytes) > 0; i++ { + _, sz := utf8.DecodeRune(bytes) + bytes = bytes[sz:] + if sz > 1 { + indLine += wideDot + } else { + indLine += dot + } + } + if _, sz := utf8.DecodeRune(bytes); sz > 1 { + indLine += wideInd + } else { + indLine += ind + } + result += srcLine + indLine + } + return result +} diff --git a/vendor/github.com/google/cel-go/common/errors.go b/vendor/github.com/google/cel-go/common/errors.go new file mode 100644 index 00000000..25adc73d --- /dev/null +++ b/vendor/github.com/google/cel-go/common/errors.go @@ -0,0 +1,103 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "fmt" + "sort" + "strings" +) + +// Errors type which contains a list of errors observed during parsing. +type Errors struct { + errors []*Error + source Source + numErrors int + maxErrorsToReport int +} + +// NewErrors creates a new instance of the Errors type. +func NewErrors(source Source) *Errors { + return &Errors{ + errors: []*Error{}, + source: source, + maxErrorsToReport: 100, + } +} + +// ReportError records an error at a source location. +func (e *Errors) ReportError(l Location, format string, args ...any) { + e.ReportErrorAtID(0, l, format, args...) +} + +// ReportErrorAtID records an error at a source location and expression id. +func (e *Errors) ReportErrorAtID(id int64, l Location, format string, args ...any) { + e.numErrors++ + if e.numErrors > e.maxErrorsToReport { + return + } + err := &Error{ + ExprID: id, + Location: l, + Message: fmt.Sprintf(format, args...), + } + e.errors = append(e.errors, err) +} + +// GetErrors returns the list of observed errors. +func (e *Errors) GetErrors() []*Error { + return e.errors[:] +} + +// Append creates a new Errors object with the current and input errors. +func (e *Errors) Append(errs []*Error) *Errors { + return &Errors{ + errors: append(e.errors[:], errs...), + source: e.source, + numErrors: e.numErrors + len(errs), + maxErrorsToReport: e.maxErrorsToReport, + } +} + +// ToDisplayString returns the error set to a newline delimited string. +func (e *Errors) ToDisplayString() string { + errorsInString := e.maxErrorsToReport + if e.numErrors > e.maxErrorsToReport { + // add one more error to indicate the number of errors truncated. + errorsInString++ + } else { + // otherwise the error set will just contain the number of errors. + errorsInString = e.numErrors + } + + result := make([]string, errorsInString) + sort.SliceStable(e.errors, func(i, j int) bool { + ei := e.errors[i].Location + ej := e.errors[j].Location + return ei.Line() < ej.Line() || + (ei.Line() == ej.Line() && ei.Column() < ej.Column()) + }) + for i, err := range e.errors { + // This can happen during the append of two errors objects + if i >= e.maxErrorsToReport { + break + } + result[i] = err.ToDisplayString(e.source) + } + if e.numErrors > e.maxErrorsToReport { + result[e.maxErrorsToReport] = fmt.Sprintf("%d more errors were truncated", e.numErrors-e.maxErrorsToReport) + } + return strings.Join(result, "\n") +} diff --git a/vendor/github.com/google/cel-go/common/functions/BUILD.bazel b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel new file mode 100644 index 00000000..3cc27d60 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/functions/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "functions.go", + ], + importpath = "github.com/google/cel-go/common/functions", + deps = [ + "//common/types/ref:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/functions/functions.go b/vendor/github.com/google/cel-go/common/functions/functions.go new file mode 100644 index 00000000..67f4a594 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/functions/functions.go @@ -0,0 +1,61 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package functions defines the standard builtin functions supported by the interpreter +package functions + +import "github.com/google/cel-go/common/types/ref" + +// Overload defines a named overload of a function, indicating an operand trait +// which must be present on the first argument to the overload as well as one +// of either a unary, binary, or function implementation. +// +// The majority of operators within the expression language are unary or binary +// and the specializations simplify the call contract for implementers of +// types with operator overloads. Any added complexity is assumed to be handled +// by the generic FunctionOp. +type Overload struct { + // Operator name as written in an expression or defined within + // operators.go. + Operator string + + // Operand trait used to dispatch the call. The zero-value indicates a + // global function overload or that one of the Unary / Binary / Function + // definitions should be used to execute the call. + OperandTrait int + + // Unary defines the overload with a UnaryOp implementation. May be nil. + Unary UnaryOp + + // Binary defines the overload with a BinaryOp implementation. May be nil. + Binary BinaryOp + + // Function defines the overload with a FunctionOp implementation. May be + // nil. + Function FunctionOp + + // NonStrict specifies whether the Overload will tolerate arguments that + // are types.Err or types.Unknown. + NonStrict bool +} + +// UnaryOp is a function that takes a single value and produces an output. +type UnaryOp func(value ref.Val) ref.Val + +// BinaryOp is a function that takes two values and produces an output. +type BinaryOp func(lhs ref.Val, rhs ref.Val) ref.Val + +// FunctionOp is a function with accepts zero or more arguments and produces +// a value or error as a result. +type FunctionOp func(values ...ref.Val) ref.Val diff --git a/vendor/github.com/google/cel-go/common/location.go b/vendor/github.com/google/cel-go/common/location.go new file mode 100644 index 00000000..ec3fa7cb --- /dev/null +++ b/vendor/github.com/google/cel-go/common/location.go @@ -0,0 +1,51 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +// Location interface to represent a location within Source. +type Location interface { + Line() int // 1-based line number within source. + Column() int // 0-based column number within source. +} + +// SourceLocation helper type to manually construct a location. +type SourceLocation struct { + line int + column int +} + +var ( + // Location implements the SourceLocation interface. + _ Location = &SourceLocation{} + // NoLocation is a particular illegal location. + NoLocation = &SourceLocation{-1, -1} +) + +// NewLocation creates a new location. +func NewLocation(line, column int) Location { + return &SourceLocation{ + line: line, + column: column} +} + +// Line returns the 1-based line of the location. +func (l *SourceLocation) Line() int { + return l.line +} + +// Column returns the 0-based column number of the location. +func (l *SourceLocation) Column() int { + return l.column +} diff --git a/vendor/github.com/google/cel-go/common/operators/BUILD.bazel b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel new file mode 100644 index 00000000..b5b67f06 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/operators/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "operators.go", + ], + importpath = "github.com/google/cel-go/common/operators", +) diff --git a/vendor/github.com/google/cel-go/common/operators/operators.go b/vendor/github.com/google/cel-go/common/operators/operators.go new file mode 100644 index 00000000..f9b39bda --- /dev/null +++ b/vendor/github.com/google/cel-go/common/operators/operators.go @@ -0,0 +1,157 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package operators defines the internal function names of operators. +// +// All operators in the expression language are modelled as function calls. +package operators + +// String "names" for CEL operators. +const ( + // Symbolic operators. + Conditional = "_?_:_" + LogicalAnd = "_&&_" + LogicalOr = "_||_" + LogicalNot = "!_" + Equals = "_==_" + NotEquals = "_!=_" + Less = "_<_" + LessEquals = "_<=_" + Greater = "_>_" + GreaterEquals = "_>=_" + Add = "_+_" + Subtract = "_-_" + Multiply = "_*_" + Divide = "_/_" + Modulo = "_%_" + Negate = "-_" + Index = "_[_]" + OptIndex = "_[?_]" + OptSelect = "_?._" + + // Macros, must have a valid identifier. + Has = "has" + All = "all" + Exists = "exists" + ExistsOne = "exists_one" + Map = "map" + Filter = "filter" + + // Named operators, must not have be valid identifiers. + NotStrictlyFalse = "@not_strictly_false" + In = "@in" + + // Deprecated: named operators with valid identifiers. + OldNotStrictlyFalse = "__not_strictly_false__" + OldIn = "_in_" +) + +var ( + operators = map[string]string{ + "+": Add, + "/": Divide, + "==": Equals, + ">": Greater, + ">=": GreaterEquals, + "in": In, + "<": Less, + "<=": LessEquals, + "%": Modulo, + "*": Multiply, + "!=": NotEquals, + "-": Subtract, + } + // operatorMap of the operator symbol which refers to a struct containing the display name, + // if applicable, the operator precedence, and the arity. + // + // If the symbol does not have a display name listed in the map, it is only because it requires + // special casing to render properly as text. + operatorMap = map[string]struct { + displayName string + precedence int + arity int + }{ + Conditional: {displayName: "", precedence: 8, arity: 3}, + LogicalOr: {displayName: "||", precedence: 7, arity: 2}, + LogicalAnd: {displayName: "&&", precedence: 6, arity: 2}, + Equals: {displayName: "==", precedence: 5, arity: 2}, + Greater: {displayName: ">", precedence: 5, arity: 2}, + GreaterEquals: {displayName: ">=", precedence: 5, arity: 2}, + In: {displayName: "in", precedence: 5, arity: 2}, + Less: {displayName: "<", precedence: 5, arity: 2}, + LessEquals: {displayName: "<=", precedence: 5, arity: 2}, + NotEquals: {displayName: "!=", precedence: 5, arity: 2}, + OldIn: {displayName: "in", precedence: 5, arity: 2}, + Add: {displayName: "+", precedence: 4, arity: 2}, + Subtract: {displayName: "-", precedence: 4, arity: 2}, + Divide: {displayName: "/", precedence: 3, arity: 2}, + Modulo: {displayName: "%", precedence: 3, arity: 2}, + Multiply: {displayName: "*", precedence: 3, arity: 2}, + LogicalNot: {displayName: "!", precedence: 2, arity: 1}, + Negate: {displayName: "-", precedence: 2, arity: 1}, + Index: {displayName: "", precedence: 1, arity: 2}, + OptIndex: {displayName: "", precedence: 1, arity: 2}, + OptSelect: {displayName: "", precedence: 1, arity: 2}, + } +) + +// Find the internal function name for an operator, if the input text is one. +func Find(text string) (string, bool) { + op, found := operators[text] + return op, found +} + +// FindReverse returns the unmangled, text representation of the operator. +func FindReverse(symbol string) (string, bool) { + op, found := operatorMap[symbol] + if !found { + return "", false + } + return op.displayName, true +} + +// FindReverseBinaryOperator returns the unmangled, text representation of a binary operator. +// +// If the symbol does refer to an operator, but the operator does not have a display name the +// result is false. +func FindReverseBinaryOperator(symbol string) (string, bool) { + op, found := operatorMap[symbol] + if !found || op.arity != 2 { + return "", false + } + if op.displayName == "" { + return "", false + } + return op.displayName, true +} + +// Precedence returns the operator precedence, where the higher the number indicates +// higher precedence operations. +func Precedence(symbol string) int { + op, found := operatorMap[symbol] + if !found { + return 0 + } + return op.precedence +} + +// Arity returns the number of argument the operator takes +// -1 is returned if an undefined symbol is provided +func Arity(symbol string) int { + op, found := operatorMap[symbol] + if !found { + return -1 + } + return op.arity +} diff --git a/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel new file mode 100644 index 00000000..e46e2f48 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/overloads/BUILD.bazel @@ -0,0 +1,14 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "overloads.go", + ], + importpath = "github.com/google/cel-go/common/overloads", +) diff --git a/vendor/github.com/google/cel-go/common/overloads/overloads.go b/vendor/github.com/google/cel-go/common/overloads/overloads.go new file mode 100644 index 00000000..9d50f436 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/overloads/overloads.go @@ -0,0 +1,327 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package overloads defines the internal overload identifiers for function and +// operator overloads. +package overloads + +// Boolean logic overloads +const ( + Conditional = "conditional" + LogicalAnd = "logical_and" + LogicalOr = "logical_or" + LogicalNot = "logical_not" + NotStrictlyFalse = "not_strictly_false" + Equals = "equals" + NotEquals = "not_equals" + LessBool = "less_bool" + LessInt64 = "less_int64" + LessInt64Double = "less_int64_double" + LessInt64Uint64 = "less_int64_uint64" + LessUint64 = "less_uint64" + LessUint64Double = "less_uint64_double" + LessUint64Int64 = "less_uint64_int64" + LessDouble = "less_double" + LessDoubleInt64 = "less_double_int64" + LessDoubleUint64 = "less_double_uint64" + LessString = "less_string" + LessBytes = "less_bytes" + LessTimestamp = "less_timestamp" + LessDuration = "less_duration" + LessEqualsBool = "less_equals_bool" + LessEqualsInt64 = "less_equals_int64" + LessEqualsInt64Double = "less_equals_int64_double" + LessEqualsInt64Uint64 = "less_equals_int64_uint64" + LessEqualsUint64 = "less_equals_uint64" + LessEqualsUint64Double = "less_equals_uint64_double" + LessEqualsUint64Int64 = "less_equals_uint64_int64" + LessEqualsDouble = "less_equals_double" + LessEqualsDoubleInt64 = "less_equals_double_int64" + LessEqualsDoubleUint64 = "less_equals_double_uint64" + LessEqualsString = "less_equals_string" + LessEqualsBytes = "less_equals_bytes" + LessEqualsTimestamp = "less_equals_timestamp" + LessEqualsDuration = "less_equals_duration" + GreaterBool = "greater_bool" + GreaterInt64 = "greater_int64" + GreaterInt64Double = "greater_int64_double" + GreaterInt64Uint64 = "greater_int64_uint64" + GreaterUint64 = "greater_uint64" + GreaterUint64Double = "greater_uint64_double" + GreaterUint64Int64 = "greater_uint64_int64" + GreaterDouble = "greater_double" + GreaterDoubleInt64 = "greater_double_int64" + GreaterDoubleUint64 = "greater_double_uint64" + GreaterString = "greater_string" + GreaterBytes = "greater_bytes" + GreaterTimestamp = "greater_timestamp" + GreaterDuration = "greater_duration" + GreaterEqualsBool = "greater_equals_bool" + GreaterEqualsInt64 = "greater_equals_int64" + GreaterEqualsInt64Double = "greater_equals_int64_double" + GreaterEqualsInt64Uint64 = "greater_equals_int64_uint64" + GreaterEqualsUint64 = "greater_equals_uint64" + GreaterEqualsUint64Double = "greater_equals_uint64_double" + GreaterEqualsUint64Int64 = "greater_equals_uint64_int64" + GreaterEqualsDouble = "greater_equals_double" + GreaterEqualsDoubleInt64 = "greater_equals_double_int64" + GreaterEqualsDoubleUint64 = "greater_equals_double_uint64" + GreaterEqualsString = "greater_equals_string" + GreaterEqualsBytes = "greater_equals_bytes" + GreaterEqualsTimestamp = "greater_equals_timestamp" + GreaterEqualsDuration = "greater_equals_duration" +) + +// Math overloads +const ( + AddInt64 = "add_int64" + AddUint64 = "add_uint64" + AddDouble = "add_double" + AddString = "add_string" + AddBytes = "add_bytes" + AddList = "add_list" + AddTimestampDuration = "add_timestamp_duration" + AddDurationTimestamp = "add_duration_timestamp" + AddDurationDuration = "add_duration_duration" + SubtractInt64 = "subtract_int64" + SubtractUint64 = "subtract_uint64" + SubtractDouble = "subtract_double" + SubtractTimestampTimestamp = "subtract_timestamp_timestamp" + SubtractTimestampDuration = "subtract_timestamp_duration" + SubtractDurationDuration = "subtract_duration_duration" + MultiplyInt64 = "multiply_int64" + MultiplyUint64 = "multiply_uint64" + MultiplyDouble = "multiply_double" + DivideInt64 = "divide_int64" + DivideUint64 = "divide_uint64" + DivideDouble = "divide_double" + ModuloInt64 = "modulo_int64" + ModuloUint64 = "modulo_uint64" + NegateInt64 = "negate_int64" + NegateDouble = "negate_double" +) + +// Index overloads +const ( + IndexList = "index_list" + IndexMap = "index_map" + IndexMessage = "index_message" // TODO: introduce concept of types.Message +) + +// In operators +const ( + DeprecatedIn = "in" + InList = "in_list" + InMap = "in_map" + InMessage = "in_message" // TODO: introduce concept of types.Message +) + +// Size overloads +const ( + Size = "size" + SizeString = "size_string" + SizeBytes = "size_bytes" + SizeList = "size_list" + SizeMap = "size_map" + SizeStringInst = "string_size" + SizeBytesInst = "bytes_size" + SizeListInst = "list_size" + SizeMapInst = "map_size" +) + +// String function names. +const ( + Contains = "contains" + EndsWith = "endsWith" + Matches = "matches" + StartsWith = "startsWith" +) + +// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations. +const ( + ExtQuoteString = "strings_quote" +) + +// String function overload names. +const ( + ContainsString = "contains_string" + EndsWithString = "ends_with_string" + MatchesString = "matches_string" + StartsWithString = "starts_with_string" +) + +// Extension function overloads with complex behaviors that need to be referenced in runtime and static analysis cost computations. +const ( + ExtFormatString = "string_format" +) + +// Time-based functions. +const ( + TimeGetFullYear = "getFullYear" + TimeGetMonth = "getMonth" + TimeGetDayOfYear = "getDayOfYear" + TimeGetDate = "getDate" + TimeGetDayOfMonth = "getDayOfMonth" + TimeGetDayOfWeek = "getDayOfWeek" + TimeGetHours = "getHours" + TimeGetMinutes = "getMinutes" + TimeGetSeconds = "getSeconds" + TimeGetMilliseconds = "getMilliseconds" +) + +// Timestamp overloads for time functions without timezones. +const ( + TimestampToYear = "timestamp_to_year" + TimestampToMonth = "timestamp_to_month" + TimestampToDayOfYear = "timestamp_to_day_of_year" + TimestampToDayOfMonthZeroBased = "timestamp_to_day_of_month" + TimestampToDayOfMonthOneBased = "timestamp_to_day_of_month_1_based" + TimestampToDayOfWeek = "timestamp_to_day_of_week" + TimestampToHours = "timestamp_to_hours" + TimestampToMinutes = "timestamp_to_minutes" + TimestampToSeconds = "timestamp_to_seconds" + TimestampToMilliseconds = "timestamp_to_milliseconds" +) + +// Timestamp overloads for time functions with timezones. +const ( + TimestampToYearWithTz = "timestamp_to_year_with_tz" + TimestampToMonthWithTz = "timestamp_to_month_with_tz" + TimestampToDayOfYearWithTz = "timestamp_to_day_of_year_with_tz" + TimestampToDayOfMonthZeroBasedWithTz = "timestamp_to_day_of_month_with_tz" + TimestampToDayOfMonthOneBasedWithTz = "timestamp_to_day_of_month_1_based_with_tz" + TimestampToDayOfWeekWithTz = "timestamp_to_day_of_week_with_tz" + TimestampToHoursWithTz = "timestamp_to_hours_with_tz" + TimestampToMinutesWithTz = "timestamp_to_minutes_with_tz" + TimestampToSecondsWithTz = "timestamp_to_seconds_tz" + TimestampToMillisecondsWithTz = "timestamp_to_milliseconds_with_tz" +) + +// Duration overloads for time functions. +const ( + DurationToHours = "duration_to_hours" + DurationToMinutes = "duration_to_minutes" + DurationToSeconds = "duration_to_seconds" + DurationToMilliseconds = "duration_to_milliseconds" +) + +// Type conversion methods and overloads +const ( + TypeConvertInt = "int" + TypeConvertUint = "uint" + TypeConvertDouble = "double" + TypeConvertBool = "bool" + TypeConvertString = "string" + TypeConvertBytes = "bytes" + TypeConvertTimestamp = "timestamp" + TypeConvertDuration = "duration" + TypeConvertType = "type" + TypeConvertDyn = "dyn" +) + +// Int conversion functions. +const ( + IntToInt = "int64_to_int64" + UintToInt = "uint64_to_int64" + DoubleToInt = "double_to_int64" + StringToInt = "string_to_int64" + TimestampToInt = "timestamp_to_int64" + DurationToInt = "duration_to_int64" +) + +// Uint conversion functions. +const ( + UintToUint = "uint64_to_uint64" + IntToUint = "int64_to_uint64" + DoubleToUint = "double_to_uint64" + StringToUint = "string_to_uint64" +) + +// Double conversion functions. +const ( + DoubleToDouble = "double_to_double" + IntToDouble = "int64_to_double" + UintToDouble = "uint64_to_double" + StringToDouble = "string_to_double" +) + +// Bool conversion functions. +const ( + BoolToBool = "bool_to_bool" + StringToBool = "string_to_bool" +) + +// Bytes conversion functions. +const ( + BytesToBytes = "bytes_to_bytes" + StringToBytes = "string_to_bytes" +) + +// String conversion functions. +const ( + StringToString = "string_to_string" + BoolToString = "bool_to_string" + IntToString = "int64_to_string" + UintToString = "uint64_to_string" + DoubleToString = "double_to_string" + BytesToString = "bytes_to_string" + TimestampToString = "timestamp_to_string" + DurationToString = "duration_to_string" +) + +// Timestamp conversion functions +const ( + TimestampToTimestamp = "timestamp_to_timestamp" + StringToTimestamp = "string_to_timestamp" + IntToTimestamp = "int64_to_timestamp" +) + +// Convert duration from string +const ( + DurationToDuration = "duration_to_duration" + StringToDuration = "string_to_duration" + IntToDuration = "int64_to_duration" +) + +// Convert to dyn +const ( + ToDyn = "to_dyn" +) + +// Comprehensions helper methods, not directly accessible via a developer. +const ( + Iterator = "@iterator" + HasNext = "@hasNext" + Next = "@next" +) + +// IsTypeConversionFunction returns whether the input function is a standard library type +// conversion function. +func IsTypeConversionFunction(function string) bool { + switch function { + case TypeConvertBool, + TypeConvertBytes, + TypeConvertDouble, + TypeConvertDuration, + TypeConvertDyn, + TypeConvertInt, + TypeConvertString, + TypeConvertTimestamp, + TypeConvertType, + TypeConvertUint: + return true + default: + return false + } +} diff --git a/vendor/github.com/google/cel-go/common/runes/BUILD.bazel b/vendor/github.com/google/cel-go/common/runes/BUILD.bazel new file mode 100644 index 00000000..bb30242c --- /dev/null +++ b/vendor/github.com/google/cel-go/common/runes/BUILD.bazel @@ -0,0 +1,25 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "buffer.go", + ], + importpath = "github.com/google/cel-go/common/runes", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "buffer_test.go", + ], + embed = [ + ":go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/runes/buffer.go b/vendor/github.com/google/cel-go/common/runes/buffer.go new file mode 100644 index 00000000..02119822 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/runes/buffer.go @@ -0,0 +1,242 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package runes provides interfaces and utilities for working with runes. +package runes + +import ( + "strings" + "unicode/utf8" +) + +// Buffer is an interface for accessing a contiguous array of code points. +type Buffer interface { + Get(i int) rune + Slice(i, j int) string + Len() int +} + +type emptyBuffer struct{} + +func (e *emptyBuffer) Get(i int) rune { + panic("slice index out of bounds") +} + +func (e *emptyBuffer) Slice(i, j int) string { + if i != 0 || i != j { + panic("slice index out of bounds") + } + return "" +} + +func (e *emptyBuffer) Len() int { + return 0 +} + +var _ Buffer = &emptyBuffer{} + +// asciiBuffer is an implementation for an array of code points that contain code points only from +// the ASCII character set. +type asciiBuffer struct { + arr []byte +} + +func (a *asciiBuffer) Get(i int) rune { + return rune(uint32(a.arr[i])) +} + +func (a *asciiBuffer) Slice(i, j int) string { + return string(a.arr[i:j]) +} + +func (a *asciiBuffer) Len() int { + return len(a.arr) +} + +var _ Buffer = &asciiBuffer{} + +// basicBuffer is an implementation for an array of code points that contain code points from both +// the Latin-1 character set and Basic Multilingual Plane. +type basicBuffer struct { + arr []uint16 +} + +func (b *basicBuffer) Get(i int) rune { + return rune(uint32(b.arr[i])) +} + +func (b *basicBuffer) Slice(i, j int) string { + var str strings.Builder + str.Grow((j - i) * 3) // Worst case encoding size for 0xffff is 3. + for ; i < j; i++ { + str.WriteRune(rune(uint32(b.arr[i]))) + } + return str.String() +} + +func (b *basicBuffer) Len() int { + return len(b.arr) +} + +var _ Buffer = &basicBuffer{} + +// supplementalBuffer is an implementation for an array of code points that contain code points from +// the Latin-1 character set, Basic Multilingual Plane, or the Supplemental Multilingual Plane. +type supplementalBuffer struct { + arr []rune +} + +func (s *supplementalBuffer) Get(i int) rune { + return rune(uint32(s.arr[i])) +} + +func (s *supplementalBuffer) Slice(i, j int) string { + return string(s.arr[i:j]) +} + +func (s *supplementalBuffer) Len() int { + return len(s.arr) +} + +var _ Buffer = &supplementalBuffer{} + +var nilBuffer = &emptyBuffer{} + +// NewBuffer returns an efficient implementation of Buffer for the given text based on the ranges of +// the encoded code points contained within. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBuffer(data string) Buffer { + buf, _ := newBuffer(data, false) + return buf +} + +// NewBufferAndLineOffsets returns an efficient implementation of Buffer for the given text based on +// the ranges of the encoded code points contained within, as well as returning the line offsets. +// +// Code points are represented as an array of byte, uint16, or rune. This approach ensures that +// each index represents a code point by itself without needing to use an array of rune. At first +// we assume all code points are less than or equal to '\u007f'. If this holds true, the +// underlying storage is a byte array containing only ASCII characters. If we encountered a code +// point above this range but less than or equal to '\uffff' we allocate a uint16 array, copy the +// elements of previous byte array to the uint16 array, and continue. If this holds true, the +// underlying storage is a uint16 array containing only Unicode characters in the Basic Multilingual +// Plane. If we encounter a code point above '\uffff' we allocate an rune array, copy the previous +// elements of the byte or uint16 array, and continue. The underlying storage is an rune array +// containing any Unicode character. +func NewBufferAndLineOffsets(data string) (Buffer, []int32) { + return newBuffer(data, true) +} + +func newBuffer(data string, lines bool) (Buffer, []int32) { + if len(data) == 0 { + return nilBuffer, []int32{0} + } + var ( + idx = 0 + off int32 = 0 + buf8 = make([]byte, 0, len(data)) + buf16 []uint16 + buf32 []rune + offs []int32 + ) + for idx < len(data) { + r, s := utf8.DecodeRuneInString(data[idx:]) + idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } + if r < utf8.RuneSelf { + buf8 = append(buf8, byte(r)) + off++ + continue + } + if r <= 0xffff { + buf16 = make([]uint16, len(buf8), len(data)) + for i, v := range buf8 { + buf16[i] = uint16(v) + } + buf8 = nil + buf16 = append(buf16, uint16(r)) + off++ + goto copy16 + } + buf32 = make([]rune, len(buf8), len(data)) + for i, v := range buf8 { + buf32[i] = rune(uint32(v)) + } + buf8 = nil + buf32 = append(buf32, r) + off++ + goto copy32 + } + if lines { + offs = append(offs, off+1) + } + return &asciiBuffer{ + arr: buf8, + }, offs +copy16: + for idx < len(data) { + r, s := utf8.DecodeRuneInString(data[idx:]) + idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } + if r <= 0xffff { + buf16 = append(buf16, uint16(r)) + off++ + continue + } + buf32 = make([]rune, len(buf16), len(data)) + for i, v := range buf16 { + buf32[i] = rune(uint32(v)) + } + buf16 = nil + buf32 = append(buf32, r) + off++ + goto copy32 + } + if lines { + offs = append(offs, off+1) + } + return &basicBuffer{ + arr: buf16, + }, offs +copy32: + for idx < len(data) { + r, s := utf8.DecodeRuneInString(data[idx:]) + idx += s + if lines && r == '\n' { + offs = append(offs, off+1) + } + buf32 = append(buf32, r) + off++ + } + if lines { + offs = append(offs, off+1) + } + return &supplementalBuffer{ + arr: buf32, + }, offs +} diff --git a/vendor/github.com/google/cel-go/common/source.go b/vendor/github.com/google/cel-go/common/source.go new file mode 100644 index 00000000..ec79cb54 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/source.go @@ -0,0 +1,173 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import ( + "github.com/google/cel-go/common/runes" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Source interface for filter source contents. +type Source interface { + // Content returns the source content represented as a string. + // Examples contents are the single file contents, textbox field, + // or url parameter. + Content() string + + // Description gives a brief description of the source. + // Example descriptions are a file name or ui element. + Description() string + + // LineOffsets gives the character offsets at which lines occur. + // The zero-th entry should refer to the break between the first + // and second line, or EOF if there is only one line of source. + LineOffsets() []int32 + + // LocationOffset translates a Location to an offset. + // Given the line and column of the Location returns the + // Location's character offset in the Source, and a bool + // indicating whether the Location was found. + LocationOffset(location Location) (int32, bool) + + // OffsetLocation translates a character offset to a Location, or + // false if the conversion was not feasible. + OffsetLocation(offset int32) (Location, bool) + + // NewLocation takes an input line and column and produces a Location. + // The default behavior is to treat the line and column as absolute, + // but concrete derivations may use this method to convert a relative + // line and column position into an absolute location. + NewLocation(line, col int) Location + + // Snippet returns a line of content and whether the line was found. + Snippet(line int) (string, bool) +} + +// The sourceImpl type implementation of the Source interface. +type sourceImpl struct { + runes.Buffer + description string + lineOffsets []int32 +} + +var _ runes.Buffer = &sourceImpl{} + +// TODO(jimlarson) "Character offsets" should index the code points +// within the UTF-8 encoded string. It currently indexes bytes. +// Can be accomplished by using rune[] instead of string for contents. + +// NewTextSource creates a new Source from the input text string. +func NewTextSource(text string) Source { + return NewStringSource(text, "") +} + +// NewStringSource creates a new Source from the given contents and description. +func NewStringSource(contents string, description string) Source { + // Compute line offsets up front as they are referred to frequently. + buf, offs := runes.NewBufferAndLineOffsets(contents) + return &sourceImpl{ + Buffer: buf, + description: description, + lineOffsets: offs, + } +} + +// NewInfoSource creates a new Source from a SourceInfo. +func NewInfoSource(info *exprpb.SourceInfo) Source { + return &sourceImpl{ + Buffer: runes.NewBuffer(""), + description: info.GetLocation(), + lineOffsets: info.GetLineOffsets(), + } +} + +// Content implements the Source interface method. +func (s *sourceImpl) Content() string { + return s.Slice(0, s.Len()) +} + +// Description implements the Source interface method. +func (s *sourceImpl) Description() string { + return s.description +} + +// LineOffsets implements the Source interface method. +func (s *sourceImpl) LineOffsets() []int32 { + return s.lineOffsets +} + +// LocationOffset implements the Source interface method. +func (s *sourceImpl) LocationOffset(location Location) (int32, bool) { + if lineOffset, found := s.findLineOffset(location.Line()); found { + return lineOffset + int32(location.Column()), true + } + return -1, false +} + +// NewLocation implements the Source interface method. +func (s *sourceImpl) NewLocation(line, col int) Location { + return NewLocation(line, col) +} + +// OffsetLocation implements the Source interface method. +func (s *sourceImpl) OffsetLocation(offset int32) (Location, bool) { + line, lineOffset := s.findLine(offset) + return NewLocation(int(line), int(offset-lineOffset)), true +} + +// Snippet implements the Source interface method. +func (s *sourceImpl) Snippet(line int) (string, bool) { + charStart, found := s.findLineOffset(line) + if !found || s.Len() == 0 { + return "", false + } + charEnd, found := s.findLineOffset(line + 1) + if found { + return s.Slice(int(charStart), int(charEnd-1)), true + } + return s.Slice(int(charStart), s.Len()), true +} + +// findLineOffset returns the offset where the (1-indexed) line begins, +// or false if line doesn't exist. +func (s *sourceImpl) findLineOffset(line int) (int32, bool) { + if line == 1 { + return 0, true + } + if line > 1 && line <= int(len(s.lineOffsets)) { + offset := s.lineOffsets[line-2] + return offset, true + } + return -1, false +} + +// findLine finds the line that contains the given character offset and +// returns the line number and offset of the beginning of that line. +// Note that the last line is treated as if it contains all offsets +// beyond the end of the actual source. +func (s *sourceImpl) findLine(characterOffset int32) (int32, int32) { + var line int32 = 1 + for _, lineOffset := range s.lineOffsets { + if lineOffset > characterOffset { + break + } + line++ + } + if line == 1 { + return line, 0 + } + return line, s.lineOffsets[line-2] +} diff --git a/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel new file mode 100644 index 00000000..b55f4521 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/stdlib/BUILD.bazel @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "standard.go", + ], + importpath = "github.com/google/cel-go/common/stdlib", + deps = [ + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + ], +) \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/common/stdlib/standard.go b/vendor/github.com/google/cel-go/common/stdlib/standard.go new file mode 100644 index 00000000..1550c178 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/stdlib/standard.go @@ -0,0 +1,620 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stdlib contains all of the standard library function declarations and definitions for CEL. +package stdlib + +import ( + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +var ( + stdFunctions []*decls.FunctionDecl + stdTypes []*decls.VariableDecl +) + +func init() { + paramA := types.NewTypeParamType("A") + paramB := types.NewTypeParamType("B") + listOfA := types.NewListType(paramA) + mapOfAB := types.NewMapType(paramA, paramB) + + stdTypes = []*decls.VariableDecl{ + decls.TypeVariable(types.BoolType), + decls.TypeVariable(types.BytesType), + decls.TypeVariable(types.DoubleType), + decls.TypeVariable(types.DurationType), + decls.TypeVariable(types.IntType), + decls.TypeVariable(listOfA), + decls.TypeVariable(mapOfAB), + decls.TypeVariable(types.NullType), + decls.TypeVariable(types.StringType), + decls.TypeVariable(types.TimestampType), + decls.TypeVariable(types.TypeType), + decls.TypeVariable(types.UintType), + } + + stdFunctions = []*decls.FunctionDecl{ + // Logical operators. Special-cased within the interpreter. + // Note, the singleton binding prevents extensions from overriding the operator behavior. + function(operators.Conditional, + decls.Overload(overloads.Conditional, argTypes(types.BoolType, paramA, paramA), paramA, + decls.OverloadIsNonStrict()), + decls.SingletonFunctionBinding(noFunctionOverrides)), + function(operators.LogicalAnd, + decls.Overload(overloads.LogicalAnd, argTypes(types.BoolType, types.BoolType), types.BoolType, + decls.OverloadIsNonStrict()), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.LogicalOr, + decls.Overload(overloads.LogicalOr, argTypes(types.BoolType, types.BoolType), types.BoolType, + decls.OverloadIsNonStrict()), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.LogicalNot, + decls.Overload(overloads.LogicalNot, argTypes(types.BoolType), types.BoolType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + b, ok := val.(types.Bool) + if !ok { + return types.MaybeNoSuchOverloadErr(val) + } + return b.Negate() + })), + + // Comprehension short-circuiting related function + function(operators.NotStrictlyFalse, + decls.Overload(overloads.NotStrictlyFalse, argTypes(types.BoolType), types.BoolType, + decls.OverloadIsNonStrict(), + decls.UnaryBinding(notStrictlyFalse))), + // Deprecated: __not_strictly_false__ + function(operators.OldNotStrictlyFalse, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(operators.OldNotStrictlyFalse, argTypes(types.BoolType), types.BoolType, + decls.OverloadIsNonStrict(), + decls.UnaryBinding(notStrictlyFalse))), + + // Equality / inequality. Special-cased in the interpreter + function(operators.Equals, + decls.Overload(overloads.Equals, argTypes(paramA, paramA), types.BoolType), + decls.SingletonBinaryBinding(noBinaryOverrides)), + function(operators.NotEquals, + decls.Overload(overloads.NotEquals, argTypes(paramA, paramA), types.BoolType), + decls.SingletonBinaryBinding(noBinaryOverrides)), + + // Mathematical operators + function(operators.Add, + decls.Overload(overloads.AddBytes, + argTypes(types.BytesType, types.BytesType), types.BytesType), + decls.Overload(overloads.AddDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.AddDurationDuration, + argTypes(types.DurationType, types.DurationType), types.DurationType), + decls.Overload(overloads.AddDurationTimestamp, + argTypes(types.DurationType, types.TimestampType), types.TimestampType), + decls.Overload(overloads.AddTimestampDuration, + argTypes(types.TimestampType, types.DurationType), types.TimestampType), + decls.Overload(overloads.AddInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.AddList, + argTypes(listOfA, listOfA), listOfA), + decls.Overload(overloads.AddString, + argTypes(types.StringType, types.StringType), types.StringType), + decls.Overload(overloads.AddUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Adder).Add(rhs) + }, traits.AdderType)), + function(operators.Divide, + decls.Overload(overloads.DivideDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.DivideInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.DivideUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Divider).Divide(rhs) + }, traits.DividerType)), + function(operators.Modulo, + decls.Overload(overloads.ModuloInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.ModuloUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Modder).Modulo(rhs) + }, traits.ModderType)), + function(operators.Multiply, + decls.Overload(overloads.MultiplyDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.MultiplyInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.MultiplyUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Multiplier).Multiply(rhs) + }, traits.MultiplierType)), + function(operators.Negate, + decls.Overload(overloads.NegateDouble, argTypes(types.DoubleType), types.DoubleType), + decls.Overload(overloads.NegateInt64, argTypes(types.IntType), types.IntType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + if types.IsBool(val) { + return types.MaybeNoSuchOverloadErr(val) + } + return val.(traits.Negater).Negate() + }, traits.NegatorType)), + function(operators.Subtract, + decls.Overload(overloads.SubtractDouble, + argTypes(types.DoubleType, types.DoubleType), types.DoubleType), + decls.Overload(overloads.SubtractDurationDuration, + argTypes(types.DurationType, types.DurationType), types.DurationType), + decls.Overload(overloads.SubtractInt64, + argTypes(types.IntType, types.IntType), types.IntType), + decls.Overload(overloads.SubtractTimestampDuration, + argTypes(types.TimestampType, types.DurationType), types.TimestampType), + decls.Overload(overloads.SubtractTimestampTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.DurationType), + decls.Overload(overloads.SubtractUint64, + argTypes(types.UintType, types.UintType), types.UintType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Subtractor).Subtract(rhs) + }, traits.SubtractorType)), + + // Relations operators + + function(operators.Less, + decls.Overload(overloads.LessBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.LessInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.LessInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.LessUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.LessUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.LessDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.LessDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.LessString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.LessBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.LessTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.LessDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne { + return types.True + } + if cmp == types.IntOne || cmp == types.IntZero { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.LessEquals, + decls.Overload(overloads.LessEqualsBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.LessEqualsDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.LessEqualsDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.LessEqualsString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.LessEqualsBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.LessEqualsTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.LessEqualsDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntOne { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.Greater, + decls.Overload(overloads.GreaterBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.GreaterInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.GreaterBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.GreaterTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.GreaterDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne { + return types.True + } + if cmp == types.IntNegOne || cmp == types.IntZero { + return types.False + } + return cmp + }, traits.ComparerType)), + + function(operators.GreaterEquals, + decls.Overload(overloads.GreaterEqualsBool, + argTypes(types.BoolType, types.BoolType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64, + argTypes(types.IntType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64Double, + argTypes(types.IntType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsInt64Uint64, + argTypes(types.IntType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64, + argTypes(types.UintType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64Double, + argTypes(types.UintType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsUint64Int64, + argTypes(types.UintType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDouble, + argTypes(types.DoubleType, types.DoubleType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDoubleInt64, + argTypes(types.DoubleType, types.IntType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDoubleUint64, + argTypes(types.DoubleType, types.UintType), types.BoolType), + decls.Overload(overloads.GreaterEqualsString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.Overload(overloads.GreaterEqualsBytes, + argTypes(types.BytesType, types.BytesType), types.BoolType), + decls.Overload(overloads.GreaterEqualsTimestamp, + argTypes(types.TimestampType, types.TimestampType), types.BoolType), + decls.Overload(overloads.GreaterEqualsDuration, + argTypes(types.DurationType, types.DurationType), types.BoolType), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + cmp := lhs.(traits.Comparer).Compare(rhs) + if cmp == types.IntOne || cmp == types.IntZero { + return types.True + } + if cmp == types.IntNegOne { + return types.False + } + return cmp + }, traits.ComparerType)), + + // Indexing + function(operators.Index, + decls.Overload(overloads.IndexList, argTypes(listOfA, types.IntType), paramA), + decls.Overload(overloads.IndexMap, argTypes(mapOfAB, paramA), paramB), + decls.SingletonBinaryBinding(func(lhs, rhs ref.Val) ref.Val { + return lhs.(traits.Indexer).Get(rhs) + }, traits.IndexerType)), + + // Collections operators + function(operators.In, + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(operators.OldIn, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(overloads.DeprecatedIn, + decls.DisableDeclaration(true), // safe deprecation + decls.Overload(overloads.InList, argTypes(paramA, listOfA), types.BoolType), + decls.Overload(overloads.InMap, argTypes(paramA, mapOfAB), types.BoolType), + decls.SingletonBinaryBinding(inAggregate)), + function(overloads.Size, + decls.Overload(overloads.SizeBytes, argTypes(types.BytesType), types.IntType), + decls.MemberOverload(overloads.SizeBytesInst, argTypes(types.BytesType), types.IntType), + decls.Overload(overloads.SizeList, argTypes(listOfA), types.IntType), + decls.MemberOverload(overloads.SizeListInst, argTypes(listOfA), types.IntType), + decls.Overload(overloads.SizeMap, argTypes(mapOfAB), types.IntType), + decls.MemberOverload(overloads.SizeMapInst, argTypes(mapOfAB), types.IntType), + decls.Overload(overloads.SizeString, argTypes(types.StringType), types.IntType), + decls.MemberOverload(overloads.SizeStringInst, argTypes(types.StringType), types.IntType), + decls.SingletonUnaryBinding(func(val ref.Val) ref.Val { + return val.(traits.Sizer).Size() + }, traits.SizerType)), + + // Type conversions + function(overloads.TypeConvertType, + decls.Overload(overloads.TypeConvertType, argTypes(paramA), types.NewTypeTypeWithParam(paramA)), + decls.SingletonUnaryBinding(convertToType(types.TypeType))), + + // Bool conversions + function(overloads.TypeConvertBool, + decls.Overload(overloads.BoolToBool, argTypes(types.BoolType), types.BoolType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.StringToBool, argTypes(types.StringType), types.BoolType, + decls.UnaryBinding(convertToType(types.BoolType)))), + + // Bytes conversions + function(overloads.TypeConvertBytes, + decls.Overload(overloads.BytesToBytes, argTypes(types.BytesType), types.BytesType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.StringToBytes, argTypes(types.StringType), types.BytesType, + decls.UnaryBinding(convertToType(types.BytesType)))), + + // Double conversions + function(overloads.TypeConvertDouble, + decls.Overload(overloads.DoubleToDouble, argTypes(types.DoubleType), types.DoubleType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToDouble, argTypes(types.IntType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType))), + decls.Overload(overloads.StringToDouble, argTypes(types.StringType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType))), + decls.Overload(overloads.UintToDouble, argTypes(types.UintType), types.DoubleType, + decls.UnaryBinding(convertToType(types.DoubleType)))), + + // Duration conversions + function(overloads.TypeConvertDuration, + decls.Overload(overloads.DurationToDuration, argTypes(types.DurationType), types.DurationType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToDuration, argTypes(types.IntType), types.DurationType, + decls.UnaryBinding(convertToType(types.DurationType))), + decls.Overload(overloads.StringToDuration, argTypes(types.StringType), types.DurationType, + decls.UnaryBinding(convertToType(types.DurationType)))), + + // Dyn conversions + function(overloads.TypeConvertDyn, + decls.Overload(overloads.ToDyn, argTypes(paramA), types.DynType), + decls.SingletonUnaryBinding(identity)), + + // Int conversions + function(overloads.TypeConvertInt, + decls.Overload(overloads.IntToInt, argTypes(types.IntType), types.IntType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.DoubleToInt, argTypes(types.DoubleType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.DurationToInt, argTypes(types.DurationType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.StringToInt, argTypes(types.StringType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.TimestampToInt, argTypes(types.TimestampType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + decls.Overload(overloads.UintToInt, argTypes(types.UintType), types.IntType, + decls.UnaryBinding(convertToType(types.IntType))), + ), + + // String conversions + function(overloads.TypeConvertString, + decls.Overload(overloads.StringToString, argTypes(types.StringType), types.StringType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.BoolToString, argTypes(types.BoolType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.BytesToString, argTypes(types.BytesType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.DoubleToString, argTypes(types.DoubleType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.DurationToString, argTypes(types.DurationType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.IntToString, argTypes(types.IntType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.TimestampToString, argTypes(types.TimestampType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType))), + decls.Overload(overloads.UintToString, argTypes(types.UintType), types.StringType, + decls.UnaryBinding(convertToType(types.StringType)))), + + // Timestamp conversions + function(overloads.TypeConvertTimestamp, + decls.Overload(overloads.TimestampToTimestamp, argTypes(types.TimestampType), types.TimestampType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.IntToTimestamp, argTypes(types.IntType), types.TimestampType, + decls.UnaryBinding(convertToType(types.TimestampType))), + decls.Overload(overloads.StringToTimestamp, argTypes(types.StringType), types.TimestampType, + decls.UnaryBinding(convertToType(types.TimestampType)))), + + // Uint conversions + function(overloads.TypeConvertUint, + decls.Overload(overloads.UintToUint, argTypes(types.UintType), types.UintType, + decls.UnaryBinding(identity)), + decls.Overload(overloads.DoubleToUint, argTypes(types.DoubleType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType))), + decls.Overload(overloads.IntToUint, argTypes(types.IntType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType))), + decls.Overload(overloads.StringToUint, argTypes(types.StringType), types.UintType, + decls.UnaryBinding(convertToType(types.UintType)))), + + // String functions + function(overloads.Contains, + decls.MemberOverload(overloads.ContainsString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringContains)), + decls.DisableTypeGuards(true)), + function(overloads.EndsWith, + decls.MemberOverload(overloads.EndsWithString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringEndsWith)), + decls.DisableTypeGuards(true)), + function(overloads.StartsWith, + decls.MemberOverload(overloads.StartsWithString, + argTypes(types.StringType, types.StringType), types.BoolType, + decls.BinaryBinding(types.StringStartsWith)), + decls.DisableTypeGuards(true)), + function(overloads.Matches, + decls.Overload(overloads.Matches, argTypes(types.StringType, types.StringType), types.BoolType), + decls.MemberOverload(overloads.MatchesString, + argTypes(types.StringType, types.StringType), types.BoolType), + decls.SingletonBinaryBinding(func(str, pat ref.Val) ref.Val { + return str.(traits.Matcher).Match(pat) + }, traits.MatcherType)), + + // Timestamp / duration functions + function(overloads.TimeGetFullYear, + decls.MemberOverload(overloads.TimestampToYear, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToYearWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetMonth, + decls.MemberOverload(overloads.TimestampToMonth, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMonthWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfYear, + decls.MemberOverload(overloads.TimestampToDayOfYear, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfYearWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfMonth, + decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBased, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfMonthZeroBasedWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDate, + decls.MemberOverload(overloads.TimestampToDayOfMonthOneBased, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfMonthOneBasedWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetDayOfWeek, + decls.MemberOverload(overloads.TimestampToDayOfWeek, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToDayOfWeekWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType)), + + function(overloads.TimeGetHours, + decls.MemberOverload(overloads.TimestampToHours, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToHoursWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToHours, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetMinutes, + decls.MemberOverload(overloads.TimestampToMinutes, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMinutesWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToMinutes, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetSeconds, + decls.MemberOverload(overloads.TimestampToSeconds, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToSecondsWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToSeconds, + argTypes(types.DurationType), types.IntType)), + + function(overloads.TimeGetMilliseconds, + decls.MemberOverload(overloads.TimestampToMilliseconds, + argTypes(types.TimestampType), types.IntType), + decls.MemberOverload(overloads.TimestampToMillisecondsWithTz, + argTypes(types.TimestampType, types.StringType), types.IntType), + decls.MemberOverload(overloads.DurationToMilliseconds, + argTypes(types.DurationType), types.IntType)), + } +} + +// Functions returns the set of standard library function declarations and definitions for CEL. +func Functions() []*decls.FunctionDecl { + return stdFunctions +} + +// Types returns the set of standard library types for CEL. +func Types() []*decls.VariableDecl { + return stdTypes +} + +func notStrictlyFalse(value ref.Val) ref.Val { + if types.IsBool(value) { + return value + } + return types.True +} + +func inAggregate(lhs ref.Val, rhs ref.Val) ref.Val { + if rhs.Type().HasTrait(traits.ContainerType) { + return rhs.(traits.Container).Contains(lhs) + } + return types.ValOrErr(rhs, "no such overload") +} + +func function(name string, opts ...decls.FunctionOpt) *decls.FunctionDecl { + fn, err := decls.NewFunction(name, opts...) + if err != nil { + panic(err) + } + return fn +} + +func argTypes(args ...*types.Type) []*types.Type { + return args +} + +func noBinaryOverrides(rhs, lhs ref.Val) ref.Val { + return types.NoSuchOverloadErr() +} + +func noFunctionOverrides(args ...ref.Val) ref.Val { + return types.NoSuchOverloadErr() +} + +func identity(val ref.Val) ref.Val { + return val +} + +func convertToType(t ref.Type) functions.UnaryOp { + return func(val ref.Val) ref.Val { + return val.ConvertToType(t) + } +} diff --git a/vendor/github.com/google/cel-go/common/types/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/BUILD.bazel new file mode 100644 index 00000000..b5e44ffb --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/BUILD.bazel @@ -0,0 +1,90 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "any_value.go", + "bool.go", + "bytes.go", + "compare.go", + "double.go", + "duration.go", + "err.go", + "int.go", + "iterator.go", + "json_value.go", + "list.go", + "map.go", + "null.go", + "object.go", + "optional.go", + "overflow.go", + "provider.go", + "string.go", + "timestamp.go", + "types.go", + "uint.go", + "unknown.go", + "util.go", + ], + importpath = "github.com/google/cel-go/common/types", + deps = [ + "//checker/decls:go_default_library", + "//common/overloads:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@com_github_stoewer_go_strcase//:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "bool_test.go", + "bytes_test.go", + "double_test.go", + "duration_test.go", + "int_test.go", + "json_list_test.go", + "json_struct_test.go", + "list_test.go", + "map_test.go", + "null_test.go", + "object_test.go", + "optional_test.go", + "provider_test.go", + "string_test.go", + "timestamp_test.go", + "types_test.go", + "uint_test.go", + "unknown_test.go", + "util_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//common/types/ref:go_default_library", + "//test:go_default_library", + "//test/proto3pb:test_all_types_go_proto", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/types/any_value.go b/vendor/github.com/google/cel-go/common/types/any_value.go new file mode 100644 index 00000000..cda0f13a --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/any_value.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// anyValueType constant representing the reflected type of google.protobuf.Any. +var anyValueType = reflect.TypeOf(&anypb.Any{}) diff --git a/vendor/github.com/google/cel-go/common/types/bool.go b/vendor/github.com/google/cel-go/common/types/bool.go new file mode 100644 index 00000000..565734f3 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/bool.go @@ -0,0 +1,141 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Bool type that implements ref.Val and supports comparison and negation. +type Bool bool + +var ( + // boolWrapperType golang reflected type for protobuf bool wrapper type. + boolWrapperType = reflect.TypeOf(&wrapperspb.BoolValue{}) +) + +// Boolean constants +const ( + False = Bool(false) + True = Bool(true) +) + +// Compare implements the traits.Comparer interface method. +func (b Bool) Compare(other ref.Val) ref.Val { + otherBool, ok := other.(Bool) + if !ok { + return ValOrErr(other, "no such overload") + } + if b == otherBool { + return IntZero + } + if !b && otherBool { + return IntNegOne + } + return IntOne +} + +// ConvertToNative implements the ref.Val interface method. +func (b Bool) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Bool: + return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped to a wrapperspb.BoolValue before being packed into an Any. + return anypb.New(wrapperspb.Bool(bool(b))) + case boolWrapperType: + // Convert the bool to a wrapperspb.BoolValue. + return wrapperspb.Bool(bool(b)), nil + case jsonValueType: + // Return the bool as a new structpb.Value. + return structpb.NewBoolValue(bool(b)), nil + default: + if typeDesc.Elem().Kind() == reflect.Bool { + p := bool(b) + return &p, nil + } + } + case reflect.Interface: + bv := b.Value() + if reflect.TypeOf(bv).Implements(typeDesc) { + return bv, nil + } + if reflect.TypeOf(b).Implements(typeDesc) { + return b, nil + } + } + return nil, fmt.Errorf("type conversion error from bool to '%v'", typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (b Bool) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String(strconv.FormatBool(bool(b))) + case BoolType: + return b + case TypeType: + return BoolType + } + return NewErr("type conversion error from '%v' to '%v'", BoolType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (b Bool) Equal(other ref.Val) ref.Val { + otherBool, ok := other.(Bool) + return Bool(ok && b == otherBool) +} + +// IsZeroValue returns true if the boolean value is false. +func (b Bool) IsZeroValue() bool { + return b == False +} + +// Negate implements the traits.Negater interface method. +func (b Bool) Negate() ref.Val { + return !b +} + +// Type implements the ref.Val interface method. +func (b Bool) Type() ref.Type { + return BoolType +} + +// Value implements the ref.Val interface method. +func (b Bool) Value() any { + return bool(b) +} + +// IsBool returns whether the input ref.Val or ref.Type is equal to BoolType. +func IsBool(elem ref.Val) bool { + switch v := elem.(type) { + case Bool: + return true + case ref.Val: + return v.Type() == BoolType + default: + return false + } +} diff --git a/vendor/github.com/google/cel-go/common/types/bytes.go b/vendor/github.com/google/cel-go/common/types/bytes.go new file mode 100644 index 00000000..7e813e29 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/bytes.go @@ -0,0 +1,140 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "bytes" + "encoding/base64" + "fmt" + "reflect" + "unicode/utf8" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Bytes type that implements ref.Val and supports add, compare, and size +// operations. +type Bytes []byte + +var ( + // byteWrapperType golang reflected type for protobuf bytes wrapper type. + byteWrapperType = reflect.TypeOf(&wrapperspb.BytesValue{}) +) + +// Add implements traits.Adder interface method by concatenating byte sequences. +func (b Bytes) Add(other ref.Val) ref.Val { + otherBytes, ok := other.(Bytes) + if !ok { + return ValOrErr(other, "no such overload") + } + return append(b, otherBytes...) +} + +// Compare implements traits.Comparer interface method by lexicographic ordering. +func (b Bytes) Compare(other ref.Val) ref.Val { + otherBytes, ok := other.(Bytes) + if !ok { + return ValOrErr(other, "no such overload") + } + return Int(bytes.Compare(b, otherBytes)) +} + +// ConvertToNative implements the ref.Val interface method. +func (b Bytes) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Array: + if len(b) != typeDesc.Len() { + return nil, fmt.Errorf("[%d]byte not assignable to [%d]byte array", len(b), typeDesc.Len()) + } + refArrPtr := reflect.New(reflect.ArrayOf(len(b), typeDesc.Elem())) + refArr := refArrPtr.Elem() + for i, byt := range b { + refArr.Index(i).Set(reflect.ValueOf(byt).Convert(typeDesc.Elem())) + } + return refArr.Interface(), nil + case reflect.Slice: + return reflect.ValueOf(b).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.Bytes([]byte(b))) + case byteWrapperType: + // Convert the bytes to a wrapperspb.BytesValue. + return wrapperspb.Bytes([]byte(b)), nil + case jsonValueType: + // CEL follows the proto3 to JSON conversion by encoding bytes to a string via base64. + // The encoding below matches the golang 'encoding/json' behavior during marshaling, + // which uses base64.StdEncoding. + str := base64.StdEncoding.EncodeToString([]byte(b)) + return structpb.NewStringValue(str), nil + } + case reflect.Interface: + bv := b.Value() + if reflect.TypeOf(bv).Implements(typeDesc) { + return bv, nil + } + if reflect.TypeOf(b).Implements(typeDesc) { + return b, nil + } + } + return nil, fmt.Errorf("type conversion error from Bytes to '%v'", typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (b Bytes) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + if !utf8.Valid(b) { + return NewErr("invalid UTF-8 in bytes, cannot convert to string") + } + return String(b) + case BytesType: + return b + case TypeType: + return BytesType + } + return NewErr("type conversion error from '%s' to '%s'", BytesType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (b Bytes) Equal(other ref.Val) ref.Val { + otherBytes, ok := other.(Bytes) + return Bool(ok && bytes.Equal(b, otherBytes)) +} + +// IsZeroValue returns true if the byte array is empty. +func (b Bytes) IsZeroValue() bool { + return len(b) == 0 +} + +// Size implements the traits.Sizer interface method. +func (b Bytes) Size() ref.Val { + return Int(len(b)) +} + +// Type implements the ref.Val interface method. +func (b Bytes) Type() ref.Type { + return BytesType +} + +// Value implements the ref.Val interface method. +func (b Bytes) Value() any { + return []byte(b) +} diff --git a/vendor/github.com/google/cel-go/common/types/compare.go b/vendor/github.com/google/cel-go/common/types/compare.go new file mode 100644 index 00000000..e1968261 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/compare.go @@ -0,0 +1,97 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + + "github.com/google/cel-go/common/types/ref" +) + +func compareDoubleInt(d Double, i Int) Int { + if d < math.MinInt64 { + return IntNegOne + } + if d > math.MaxInt64 { + return IntOne + } + return compareDouble(d, Double(i)) +} + +func compareIntDouble(i Int, d Double) Int { + return -compareDoubleInt(d, i) +} + +func compareDoubleUint(d Double, u Uint) Int { + if d < 0 { + return IntNegOne + } + if d > math.MaxUint64 { + return IntOne + } + return compareDouble(d, Double(u)) +} + +func compareUintDouble(u Uint, d Double) Int { + return -compareDoubleUint(d, u) +} + +func compareIntUint(i Int, u Uint) Int { + if i < 0 || u > math.MaxInt64 { + return IntNegOne + } + cmp := i - Int(u) + if cmp < 0 { + return IntNegOne + } + if cmp > 0 { + return IntOne + } + return IntZero +} + +func compareUintInt(u Uint, i Int) Int { + return -compareIntUint(i, u) +} + +func compareDouble(a, b Double) Int { + if a < b { + return IntNegOne + } + if a > b { + return IntOne + } + return IntZero +} + +func compareInt(a, b Int) ref.Val { + if a < b { + return IntNegOne + } + if a > b { + return IntOne + } + return IntZero +} + +func compareUint(a, b Uint) ref.Val { + if a < b { + return IntNegOne + } + if a > b { + return IntOne + } + return IntZero +} diff --git a/vendor/github.com/google/cel-go/common/types/doc.go b/vendor/github.com/google/cel-go/common/types/doc.go new file mode 100644 index 00000000..5f641d70 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types contains the types, traits, and utilities common to all +// components of expression handling. +package types diff --git a/vendor/github.com/google/cel-go/common/types/double.go b/vendor/github.com/google/cel-go/common/types/double.go new file mode 100644 index 00000000..027e7897 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/double.go @@ -0,0 +1,211 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Double type that implements ref.Val, comparison, and mathematical +// operations. +type Double float64 + +var ( + // doubleWrapperType reflected type for protobuf double wrapper type. + doubleWrapperType = reflect.TypeOf(&wrapperspb.DoubleValue{}) + + // floatWrapperType reflected type for protobuf float wrapper type. + floatWrapperType = reflect.TypeOf(&wrapperspb.FloatValue{}) +) + +// Add implements traits.Adder.Add. +func (d Double) Add(other ref.Val) ref.Val { + otherDouble, ok := other.(Double) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return d + otherDouble +} + +// Compare implements traits.Comparer.Compare. +func (d Double) Compare(other ref.Val) ref.Val { + if math.IsNaN(float64(d)) { + return NewErr("NaN values cannot be ordered") + } + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return NewErr("NaN values cannot be ordered") + } + return compareDouble(d, ov) + case Int: + return compareDoubleInt(d, ov) + case Uint: + return compareDoubleUint(d, ov) + default: + return MaybeNoSuchOverloadErr(other) + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d Double) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Float32: + v := float32(d) + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Float64: + v := float64(d) + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.Double(float64(d))) + case doubleWrapperType: + // Convert to a wrapperspb.DoubleValue + return wrapperspb.Double(float64(d)), nil + case floatWrapperType: + // Convert to a wrapperspb.FloatValue (with truncation). + return wrapperspb.Float(float32(d)), nil + case jsonValueType: + // Note, there are special cases for proto3 to json conversion that + // expect the floating point value to be converted to a NaN, + // Infinity, or -Infinity string values, but the jsonpb string + // marshaling of the protobuf.Value will handle this conversion. + return structpb.NewNumberValue(float64(d)), nil + } + switch typeDesc.Elem().Kind() { + case reflect.Float32: + v := float32(d) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + case reflect.Float64: + v := float64(d) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + } + case reflect.Interface: + dv := d.Value() + if reflect.TypeOf(dv).Implements(typeDesc) { + return dv, nil + } + if reflect.TypeOf(d).Implements(typeDesc) { + return d, nil + } + } + return nil, fmt.Errorf("type conversion error from Double to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d Double) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + i, err := doubleToInt64Checked(float64(d)) + if err != nil { + return WrapErr(err) + } + return Int(i) + case UintType: + i, err := doubleToUint64Checked(float64(d)) + if err != nil { + return WrapErr(err) + } + return Uint(i) + case DoubleType: + return d + case StringType: + return String(fmt.Sprintf("%g", float64(d))) + case TypeType: + return DoubleType + } + return NewErr("type conversion error from '%s' to '%s'", DoubleType, typeVal) +} + +// Divide implements traits.Divider.Divide. +func (d Double) Divide(other ref.Val) ref.Val { + otherDouble, ok := other.(Double) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return d / otherDouble +} + +// Equal implements ref.Val.Equal. +func (d Double) Equal(other ref.Val) ref.Val { + if math.IsNaN(float64(d)) { + return False + } + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return False + } + return Bool(d == ov) + case Int: + return Bool(compareDoubleInt(d, ov) == 0) + case Uint: + return Bool(compareDoubleUint(d, ov) == 0) + default: + return False + } +} + +// IsZeroValue returns true if double value is 0.0 +func (d Double) IsZeroValue() bool { + return float64(d) == 0.0 +} + +// Multiply implements traits.Multiplier.Multiply. +func (d Double) Multiply(other ref.Val) ref.Val { + otherDouble, ok := other.(Double) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return d * otherDouble +} + +// Negate implements traits.Negater.Negate. +func (d Double) Negate() ref.Val { + return -d +} + +// Subtract implements traits.Subtractor.Subtract. +func (d Double) Subtract(subtrahend ref.Val) ref.Val { + subtraDouble, ok := subtrahend.(Double) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + return d - subtraDouble +} + +// Type implements ref.Val.Type. +func (d Double) Type() ref.Type { + return DoubleType +} + +// Value implements ref.Val.Value. +func (d Double) Value() any { + return float64(d) +} diff --git a/vendor/github.com/google/cel-go/common/types/duration.go b/vendor/github.com/google/cel-go/common/types/duration.go new file mode 100644 index 00000000..596e56d6 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/duration.go @@ -0,0 +1,222 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strconv" + "time" + + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + dpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// Duration type that implements ref.Val and supports add, compare, negate, +// and subtract operators. This type is also a receiver which means it can +// participate in dispatch to receiver functions. +type Duration struct { + time.Duration +} + +func durationOf(d time.Duration) Duration { + return Duration{Duration: d} +} + +var ( + durationValueType = reflect.TypeOf(&dpb.Duration{}) + + durationZeroArgOverloads = map[string]func(ref.Val) ref.Val{ + overloads.TimeGetHours: DurationGetHours, + overloads.TimeGetMinutes: DurationGetMinutes, + overloads.TimeGetSeconds: DurationGetSeconds, + overloads.TimeGetMilliseconds: DurationGetMilliseconds, + } +) + +// Add implements traits.Adder.Add. +func (d Duration) Add(other ref.Val) ref.Val { + switch other.Type() { + case DurationType: + dur2 := other.(Duration) + val, err := addDurationChecked(d.Duration, dur2.Duration) + if err != nil { + return WrapErr(err) + } + return durationOf(val) + case TimestampType: + ts := other.(Timestamp).Time + val, err := addTimeDurationChecked(ts, d.Duration) + if err != nil { + return WrapErr(err) + } + return timestampOf(val) + } + return MaybeNoSuchOverloadErr(other) +} + +// Compare implements traits.Comparer.Compare. +func (d Duration) Compare(other ref.Val) ref.Val { + otherDur, ok := other.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + d1 := d.Duration + d2 := otherDur.Duration + switch { + case d1 < d2: + return IntNegOne + case d1 > d2: + return IntOne + default: + return IntZero + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d Duration) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the duration is already assignable to the desired type return it. + if reflect.TypeOf(d.Duration).AssignableTo(typeDesc) { + return d.Duration, nil + } + if reflect.TypeOf(d).AssignableTo(typeDesc) { + return d, nil + } + switch typeDesc { + case anyValueType: + // Pack the duration as a dpb.Duration into an Any value. + return anypb.New(dpb.New(d.Duration)) + case durationValueType: + // Unwrap the CEL value to its underlying proto value. + return dpb.New(d.Duration), nil + case jsonValueType: + // CEL follows the proto3 to JSON conversion. + // Note, using jsonpb would wrap the result in extra double quotes. + v := d.ConvertToType(StringType) + if IsError(v) { + return nil, v.(*Err) + } + return structpb.NewStringValue(string(v.(String))), nil + } + return nil, fmt.Errorf("type conversion error from 'Duration' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d Duration) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String(strconv.FormatFloat(d.Seconds(), 'f', -1, 64) + "s") + case IntType: + return Int(d.Duration) + case DurationType: + return d + case TypeType: + return DurationType + } + return NewErr("type conversion error from '%s' to '%s'", DurationType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (d Duration) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Duration) + return Bool(ok && d.Duration == otherDur.Duration) +} + +// IsZeroValue returns true if the duration value is zero +func (d Duration) IsZeroValue() bool { + return d.Duration == 0 +} + +// Negate implements traits.Negater.Negate. +func (d Duration) Negate() ref.Val { + val, err := negateDurationChecked(d.Duration) + if err != nil { + return WrapErr(err) + } + return durationOf(val) +} + +// Receive implements traits.Receiver.Receive. +func (d Duration) Receive(function string, overload string, args []ref.Val) ref.Val { + if len(args) == 0 { + if f, found := durationZeroArgOverloads[function]; found { + return f(d) + } + } + return NoSuchOverloadErr() +} + +// Subtract implements traits.Subtractor.Subtract. +func (d Duration) Subtract(subtrahend ref.Val) ref.Val { + subtraDur, ok := subtrahend.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + val, err := subtractDurationChecked(d.Duration, subtraDur.Duration) + if err != nil { + return WrapErr(err) + } + return durationOf(val) +} + +// Type implements ref.Val.Type. +func (d Duration) Type() ref.Type { + return DurationType +} + +// Value implements ref.Val.Value. +func (d Duration) Value() any { + return d.Duration +} + +// DurationGetHours returns the duration in hours. +func DurationGetHours(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Hours()) +} + +// DurationGetMinutes returns duration in minutes. +func DurationGetMinutes(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Minutes()) +} + +// DurationGetSeconds returns duration in seconds. +func DurationGetSeconds(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Seconds()) +} + +// DurationGetMilliseconds returns duration in milliseconds. +func DurationGetMilliseconds(val ref.Val) ref.Val { + dur, ok := val.(Duration) + if !ok { + return MaybeNoSuchOverloadErr(val) + } + return Int(dur.Milliseconds()) +} diff --git a/vendor/github.com/google/cel-go/common/types/err.go b/vendor/github.com/google/cel-go/common/types/err.go new file mode 100644 index 00000000..9c9d9e21 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/err.go @@ -0,0 +1,169 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" +) + +// Error interface which allows types types.Err values to be treated as error values. +type Error interface { + error + ref.Val +} + +// Err type which extends the built-in go error and implements ref.Val. +type Err struct { + error + id int64 +} + +var ( + // ErrType singleton. + ErrType = NewOpaqueType("error") + + // errDivideByZero is an error indicating a division by zero of an integer value. + errDivideByZero = errors.New("division by zero") + // errModulusByZero is an error indicating a modulus by zero of an integer value. + errModulusByZero = errors.New("modulus by zero") + // errIntOverflow is an error representing integer overflow. + errIntOverflow = errors.New("integer overflow") + // errUintOverflow is an error representing unsigned integer overflow. + errUintOverflow = errors.New("unsigned integer overflow") + // errDurationOverflow is an error representing duration overflow. + errDurationOverflow = errors.New("duration overflow") + // errTimestampOverflow is an error representing timestamp overflow. + errTimestampOverflow = errors.New("timestamp overflow") + celErrTimestampOverflow = &Err{error: errTimestampOverflow} + + // celErrNoSuchOverload indicates that the call arguments did not match a supported method signature. + celErrNoSuchOverload = NewErr("no such overload") +) + +// NewErr creates a new Err described by the format string and args. +// TODO: Audit the use of this function and standardize the error messages and codes. +func NewErr(format string, args ...any) ref.Val { + return &Err{error: fmt.Errorf(format, args...)} +} + +// NewErrWithNodeID creates a new Err described by the format string and args. +// TODO: Audit the use of this function and standardize the error messages and codes. +func NewErrWithNodeID(id int64, format string, args ...any) ref.Val { + return &Err{error: fmt.Errorf(format, args...), id: id} +} + +// LabelErrNode returns val unaltered it is not an Err or if the error has a non-zero +// AST node ID already present. Otherwise the id is added to the error for +// recovery with the Err.NodeID method. +func LabelErrNode(id int64, val ref.Val) ref.Val { + if err, ok := val.(*Err); ok && err.id == 0 { + err.id = id + return err + } + return val +} + +// NoSuchOverloadErr returns a new types.Err instance with a no such overload message. +func NoSuchOverloadErr() ref.Val { + return celErrNoSuchOverload +} + +// UnsupportedRefValConversionErr returns a types.NewErr instance with a no such conversion +// message that indicates that the native value could not be converted to a CEL ref.Val. +func UnsupportedRefValConversionErr(val any) ref.Val { + return NewErr("unsupported conversion to ref.Val: (%T)%v", val, val) +} + +// MaybeNoSuchOverloadErr returns the error or unknown if the input ref.Val is one of these types, +// else a new no such overload error. +func MaybeNoSuchOverloadErr(val ref.Val) ref.Val { + return ValOrErr(val, "no such overload") +} + +// ValOrErr either returns the existing error or creates a new one. +// TODO: Audit the use of this function and standardize the error messages and codes. +func ValOrErr(val ref.Val, format string, args ...any) ref.Val { + if val == nil || !IsUnknownOrError(val) { + return NewErr(format, args...) + } + return val +} + +// WrapErr wraps an existing Go error value into a CEL Err value. +func WrapErr(err error) ref.Val { + return &Err{error: err} +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (e *Err) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, e.error +} + +// ConvertToType implements ref.Val.ConvertToType. +func (e *Err) ConvertToType(typeVal ref.Type) ref.Val { + // Errors are not convertible to other representations. + return e +} + +// Equal implements ref.Val.Equal. +func (e *Err) Equal(other ref.Val) ref.Val { + // An error cannot be equal to any other value, so it returns itself. + return e +} + +// String implements fmt.Stringer. +func (e *Err) String() string { + return e.error.Error() +} + +// Type implements ref.Val.Type. +func (e *Err) Type() ref.Type { + return ErrType +} + +// Value implements ref.Val.Value. +func (e *Err) Value() any { + return e.error +} + +// NodeID returns the AST node ID of the expression that returned the error. +func (e *Err) NodeID() int64 { + return e.id +} + +// Is implements errors.Is. +func (e *Err) Is(target error) bool { + return e.error.Error() == target.Error() +} + +// Unwrap implements errors.Unwrap. +func (e *Err) Unwrap() error { + return e.error +} + +// IsError returns whether the input element ref.Type or ref.Val is equal to +// the ErrType singleton. +func IsError(val ref.Val) bool { + switch val.(type) { + case *Err: + return true + default: + return false + } +} diff --git a/vendor/github.com/google/cel-go/common/types/int.go b/vendor/github.com/google/cel-go/common/types/int.go new file mode 100644 index 00000000..0ae9507c --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/int.go @@ -0,0 +1,303 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + "strconv" + "time" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Int type that implements ref.Val as well as comparison and math operators. +type Int int64 + +// Int constants used for comparison results. +const ( + // IntZero is the zero-value for Int + IntZero = Int(0) + IntOne = Int(1) + IntNegOne = Int(-1) +) + +var ( + // int32WrapperType reflected type for protobuf int32 wrapper type. + int32WrapperType = reflect.TypeOf(&wrapperspb.Int32Value{}) + + // int64WrapperType reflected type for protobuf int64 wrapper type. + int64WrapperType = reflect.TypeOf(&wrapperspb.Int64Value{}) +) + +// Add implements traits.Adder.Add. +func (i Int) Add(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := addInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Compare implements traits.Comparer.Compare. +func (i Int) Compare(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return NewErr("NaN values cannot be ordered") + } + return compareIntDouble(i, ov) + case Int: + return compareInt(i, ov) + case Uint: + return compareIntUint(i, ov) + default: + return MaybeNoSuchOverloadErr(other) + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (i Int) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Int, reflect.Int32: + // Enums are also mapped as int32 derivations. + // Note, the code doesn't convert to the enum value directly since this is not known, but + // the net effect with respect to proto-assignment is handled correctly by the reflection + // Convert method. + v, err := int64ToInt32Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int8: + v, err := int64ToInt8Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int16: + v, err := int64ToInt16Checked(int64(i)) + if err != nil { + return nil, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Int64: + return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.Int64(int64(i))) + case int32WrapperType: + // Convert the value to a wrapperspb.Int32Value, error on overflow. + v, err := int64ToInt32Checked(int64(i)) + if err != nil { + return nil, err + } + return wrapperspb.Int32(v), nil + case int64WrapperType: + // Convert the value to a wrapperspb.Int64Value. + return wrapperspb.Int64(int64(i)), nil + case jsonValueType: + // The proto-to-JSON conversion rules would convert all 64-bit integer values to JSON + // decimal strings. Because CEL ints might come from the automatic widening of 32-bit + // values in protos, the JSON type is chosen dynamically based on the value. + // + // - Integers -2^53-1 < n < 2^53-1 are encoded as JSON numbers. + // - Integers outside this range are encoded as JSON strings. + // + // The integer to float range represents the largest interval where such a conversion + // can round-trip accurately. Thus, conversions from a 32-bit source can expect a JSON + // number as with protobuf. Those consuming JSON from a 64-bit source must be able to + // handle either a JSON number or a JSON decimal string. To handle these cases safely + // the string values must be explicitly converted to int() within a CEL expression; + // however, it is best to simply stay within the JSON number range when building JSON + // objects in CEL. + if i.isJSONSafe() { + return structpb.NewNumberValue(float64(i)), nil + } + // Proto3 to JSON conversion requires string-formatted int64 values + // since the conversion to floating point would result in truncation. + return structpb.NewStringValue(strconv.FormatInt(int64(i), 10)), nil + } + switch typeDesc.Elem().Kind() { + case reflect.Int32: + // Convert the value to a wrapperspb.Int32Value, error on overflow. + v, err := int64ToInt32Checked(int64(i)) + if err != nil { + return nil, err + } + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + case reflect.Int64: + v := int64(i) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + } + case reflect.Interface: + iv := i.Value() + if reflect.TypeOf(iv).Implements(typeDesc) { + return iv, nil + } + if reflect.TypeOf(i).Implements(typeDesc) { + return i, nil + } + } + return nil, fmt.Errorf("unsupported type conversion from 'int' to %v", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (i Int) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + return i + case UintType: + u, err := int64ToUint64Checked(int64(i)) + if err != nil { + return WrapErr(err) + } + return Uint(u) + case DoubleType: + return Double(i) + case StringType: + return String(fmt.Sprintf("%d", int64(i))) + case TimestampType: + // The maximum positive value that can be passed to time.Unix is math.MaxInt64 minus the number + // of seconds between year 1 and year 1970. See comments on unixToInternal. + if int64(i) < minUnixTime || int64(i) > maxUnixTime { + return celErrTimestampOverflow + } + return timestampOf(time.Unix(int64(i), 0).UTC()) + case TypeType: + return IntType + } + return NewErr("type conversion error from '%s' to '%s'", IntType, typeVal) +} + +// Divide implements traits.Divider.Divide. +func (i Int) Divide(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := divideInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Equal implements ref.Val.Equal. +func (i Int) Equal(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return False + } + return Bool(compareIntDouble(i, ov) == 0) + case Int: + return Bool(i == ov) + case Uint: + return Bool(compareIntUint(i, ov) == 0) + default: + return False + } +} + +// IsZeroValue returns true if integer is equal to 0 +func (i Int) IsZeroValue() bool { + return i == IntZero +} + +// Modulo implements traits.Modder.Modulo. +func (i Int) Modulo(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := moduloInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Multiply implements traits.Multiplier.Multiply. +func (i Int) Multiply(other ref.Val) ref.Val { + otherInt, ok := other.(Int) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := multiplyInt64Checked(int64(i), int64(otherInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Negate implements traits.Negater.Negate. +func (i Int) Negate() ref.Val { + val, err := negateInt64Checked(int64(i)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Subtract implements traits.Subtractor.Subtract. +func (i Int) Subtract(subtrahend ref.Val) ref.Val { + subtraInt, ok := subtrahend.(Int) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + val, err := subtractInt64Checked(int64(i), int64(subtraInt)) + if err != nil { + return WrapErr(err) + } + return Int(val) +} + +// Type implements ref.Val.Type. +func (i Int) Type() ref.Type { + return IntType +} + +// Value implements ref.Val.Value. +func (i Int) Value() any { + return int64(i) +} + +// isJSONSafe indicates whether the int is safely representable as a floating point value in JSON. +func (i Int) isJSONSafe() bool { + return i >= minIntJSON && i <= maxIntJSON +} + +const ( + // maxIntJSON is defined as the Number.MAX_SAFE_INTEGER value per EcmaScript 6. + maxIntJSON = 1<<53 - 1 + // minIntJSON is defined as the Number.MIN_SAFE_INTEGER value per EcmaScript 6. + minIntJSON = -maxIntJSON +) diff --git a/vendor/github.com/google/cel-go/common/types/iterator.go b/vendor/github.com/google/cel-go/common/types/iterator.go new file mode 100644 index 00000000..98e9147b --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/iterator.go @@ -0,0 +1,55 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +var ( + // IteratorType singleton. + IteratorType = NewObjectType("iterator", traits.IteratorType) +) + +// baseIterator is the basis for list, map, and object iterators. +// +// An iterator in and of itself should not be a valid value for comparison, but must implement the +// `ref.Val` methods in order to be well-supported within instruction arguments processed by the +// interpreter. +type baseIterator struct{} + +func (*baseIterator) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion on iterators not supported") +} + +func (*baseIterator) ConvertToType(typeVal ref.Type) ref.Val { + return NewErr("no such overload") +} + +func (*baseIterator) Equal(other ref.Val) ref.Val { + return NewErr("no such overload") +} + +func (*baseIterator) Type() ref.Type { + return IteratorType +} + +func (*baseIterator) Value() any { + return nil +} diff --git a/vendor/github.com/google/cel-go/common/types/json_value.go b/vendor/github.com/google/cel-go/common/types/json_value.go new file mode 100644 index 00000000..13a4efe7 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/json_value.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// JSON type constants representing the reflected types of protobuf JSON values. +var ( + jsonValueType = reflect.TypeOf(&structpb.Value{}) + jsonListValueType = reflect.TypeOf(&structpb.ListValue{}) + jsonStructType = reflect.TypeOf(&structpb.Struct{}) + jsonNullType = reflect.TypeOf(structpb.NullValue_NULL_VALUE) +) diff --git a/vendor/github.com/google/cel-go/common/types/list.go b/vendor/github.com/google/cel-go/common/types/list.go new file mode 100644 index 00000000..06f48dde --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/list.go @@ -0,0 +1,529 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// NewDynamicList returns a traits.Lister with heterogenous elements. +// value should be an array of "native" types, i.e. any type that +// NativeToValue() can convert to a ref.Val. +func NewDynamicList(adapter Adapter, value any) traits.Lister { + refValue := reflect.ValueOf(value) + return &baseList{ + Adapter: adapter, + value: value, + size: refValue.Len(), + get: func(i int) any { + return refValue.Index(i).Interface() + }, + } +} + +// NewStringList returns a traits.Lister containing only strings. +func NewStringList(adapter Adapter, elems []string) traits.Lister { + return &baseList{ + Adapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, + } +} + +// NewRefValList returns a traits.Lister with ref.Val elements. +// +// This type specialization is used with list literals within CEL expressions. +func NewRefValList(adapter Adapter, elems []ref.Val) traits.Lister { + return &baseList{ + Adapter: adapter, + value: elems, + size: len(elems), + get: func(i int) any { return elems[i] }, + } +} + +// NewProtoList returns a traits.Lister based on a pb.List instance. +func NewProtoList(adapter Adapter, list protoreflect.List) traits.Lister { + return &baseList{ + Adapter: adapter, + value: list, + size: list.Len(), + get: func(i int) any { return list.Get(i).Interface() }, + } +} + +// NewJSONList returns a traits.Lister based on structpb.ListValue instance. +func NewJSONList(adapter Adapter, l *structpb.ListValue) traits.Lister { + vals := l.GetValues() + return &baseList{ + Adapter: adapter, + value: l, + size: len(vals), + get: func(i int) any { return vals[i] }, + } +} + +// NewMutableList creates a new mutable list whose internal state can be modified. +func NewMutableList(adapter Adapter) traits.MutableLister { + var mutableValues []ref.Val + l := &mutableList{ + baseList: &baseList{ + Adapter: adapter, + value: mutableValues, + size: 0, + }, + mutableValues: mutableValues, + } + l.get = func(i int) any { + return l.mutableValues[i] + } + return l +} + +// baseList points to a list containing elements of any type. +// The `value` is an array of native values, and refValue is its reflection object. +// The `Adapter` enables native type to CEL type conversions. +type baseList struct { + Adapter + value any + + // size indicates the number of elements within the list. + // Since objects are immutable the size of a list is static. + size int + + // get returns a value at the specified integer index. + // The index is guaranteed to be checked against the list index range. + get func(int) any +} + +// Add implements the traits.Adder interface method. +func (l *baseList) Add(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + if l.Size() == IntZero { + return other + } + if otherList.Size() == IntZero { + return l + } + return &concatList{ + Adapter: l.Adapter, + prevList: l, + nextList: otherList} +} + +// Contains implements the traits.Container interface method. +func (l *baseList) Contains(elem ref.Val) ref.Val { + for i := 0; i < l.size; i++ { + val := l.NativeToValue(l.get(i)) + cmp := elem.Equal(val) + b, ok := cmp.(Bool) + if ok && b == True { + return True + } + } + return False +} + +// ConvertToNative implements the ref.Val interface method. +func (l *baseList) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the underlying list value is assignable to the reflected type return it. + if reflect.TypeOf(l.value).AssignableTo(typeDesc) { + return l.value, nil + } + // If the list wrapper is assignable to the desired type return it. + if reflect.TypeOf(l).AssignableTo(typeDesc) { + return l, nil + } + // Attempt to convert the list to a set of well known protobuf types. + switch typeDesc { + case anyValueType: + json, err := l.ConvertToNative(jsonListValueType) + if err != nil { + return nil, err + } + return anypb.New(json.(proto.Message)) + case jsonValueType, jsonListValueType: + jsonValues, err := + l.ConvertToNative(reflect.TypeOf([]*structpb.Value{})) + if err != nil { + return nil, err + } + jsonList := &structpb.ListValue{Values: jsonValues.([]*structpb.Value)} + if typeDesc == jsonListValueType { + return jsonList, nil + } + return structpb.NewListValue(jsonList), nil + } + // Non-list conversion. + if typeDesc.Kind() != reflect.Slice && typeDesc.Kind() != reflect.Array { + return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc) + } + + // List conversion. + // Allow the element ConvertToNative() function to determine whether conversion is possible. + otherElemType := typeDesc.Elem() + elemCount := l.size + var nativeList reflect.Value + if typeDesc.Kind() == reflect.Array { + nativeList = reflect.New(reflect.ArrayOf(elemCount, typeDesc)).Elem().Index(0) + } else { + nativeList = reflect.MakeSlice(typeDesc, elemCount, elemCount) + + } + for i := 0; i < elemCount; i++ { + elem := l.NativeToValue(l.get(i)) + nativeElemVal, err := elem.ConvertToNative(otherElemType) + if err != nil { + return nil, err + } + nativeList.Index(i).Set(reflect.ValueOf(nativeElemVal)) + } + return nativeList.Interface(), nil +} + +// ConvertToType implements the ref.Val interface method. +func (l *baseList) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case ListType: + return l + case TypeType: + return ListType + } + return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (l *baseList) Equal(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return False + } + if l.Size() != otherList.Size() { + return False + } + for i := IntZero; i < l.Size().(Int); i++ { + thisElem := l.Get(i) + otherElem := otherList.Get(i) + elemEq := Equal(thisElem, otherElem) + if elemEq == False { + return False + } + } + return True +} + +// Get implements the traits.Indexer interface method. +func (l *baseList) Get(index ref.Val) ref.Val { + ind, err := IndexOrError(index) + if err != nil { + return ValOrErr(index, err.Error()) + } + if ind < 0 || ind >= l.size { + return NewErr("index '%d' out of range in list size '%d'", ind, l.Size()) + } + return l.NativeToValue(l.get(ind)) +} + +// IsZeroValue returns true if the list is empty. +func (l *baseList) IsZeroValue() bool { + return l.size == 0 +} + +// Iterator implements the traits.Iterable interface method. +func (l *baseList) Iterator() traits.Iterator { + return newListIterator(l) +} + +// Size implements the traits.Sizer interface method. +func (l *baseList) Size() ref.Val { + return Int(l.size) +} + +// Type implements the ref.Val interface method. +func (l *baseList) Type() ref.Type { + return ListType +} + +// Value implements the ref.Val interface method. +func (l *baseList) Value() any { + return l.value +} + +// String converts the list to a human readable string form. +func (l *baseList) String() string { + var sb strings.Builder + sb.WriteString("[") + for i := 0; i < l.size; i++ { + sb.WriteString(fmt.Sprintf("%v", l.get(i))) + if i != l.size-1 { + sb.WriteString(", ") + } + } + sb.WriteString("]") + return sb.String() +} + +// mutableList aggregates values into its internal storage. For use with internal CEL variables only. +type mutableList struct { + *baseList + mutableValues []ref.Val +} + +// Add copies elements from the other list into the internal storage of the mutable list. +// The ref.Val returned by Add is the receiver. +func (l *mutableList) Add(other ref.Val) ref.Val { + switch otherList := other.(type) { + case *mutableList: + l.mutableValues = append(l.mutableValues, otherList.mutableValues...) + l.size += len(otherList.mutableValues) + case traits.Lister: + for i := IntZero; i < otherList.Size().(Int); i++ { + l.size++ + l.mutableValues = append(l.mutableValues, otherList.Get(i)) + } + default: + return MaybeNoSuchOverloadErr(otherList) + } + return l +} + +// ToImmutableList returns an immutable list based on the internal storage of the mutable list. +func (l *mutableList) ToImmutableList() traits.Lister { + // The reference to internal state is guaranteed to be safe as this call is only performed + // when mutations have been completed. + return NewRefValList(l.Adapter, l.mutableValues) +} + +// concatList combines two list implementations together into a view. +// The `Adapter` enables native type to CEL type conversions. +type concatList struct { + Adapter + value any + prevList traits.Lister + nextList traits.Lister +} + +// Add implements the traits.Adder interface method. +func (l *concatList) Add(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + if l.Size() == IntZero { + return other + } + if otherList.Size() == IntZero { + return l + } + return &concatList{ + Adapter: l.Adapter, + prevList: l, + nextList: otherList} +} + +// Contains implements the traits.Container interface method. +func (l *concatList) Contains(elem ref.Val) ref.Val { + // The concat list relies on the IsErrorOrUnknown checks against the input element to be + // performed by the `prevList` and/or `nextList`. + prev := l.prevList.Contains(elem) + // Short-circuit the return if the elem was found in the prev list. + if prev == True { + return prev + } + // Return if the elem was found in the next list. + next := l.nextList.Contains(elem) + if next == True { + return next + } + // Handle the case where an error or unknown was encountered before checking next. + if IsUnknownOrError(prev) { + return prev + } + // Otherwise, rely on the next value as the representative result. + return next +} + +// ConvertToNative implements the ref.Val interface method. +func (l *concatList) ConvertToNative(typeDesc reflect.Type) (any, error) { + combined := NewDynamicList(l.Adapter, l.Value().([]any)) + return combined.ConvertToNative(typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (l *concatList) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case ListType: + return l + case TypeType: + return ListType + } + return NewErr("type conversion error from '%s' to '%s'", ListType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (l *concatList) Equal(other ref.Val) ref.Val { + otherList, ok := other.(traits.Lister) + if !ok { + return False + } + if l.Size() != otherList.Size() { + return False + } + var maybeErr ref.Val + for i := IntZero; i < l.Size().(Int); i++ { + thisElem := l.Get(i) + otherElem := otherList.Get(i) + elemEq := Equal(thisElem, otherElem) + if elemEq == False { + return False + } + if maybeErr == nil && IsUnknownOrError(elemEq) { + maybeErr = elemEq + } + } + if maybeErr != nil { + return maybeErr + } + return True +} + +// Get implements the traits.Indexer interface method. +func (l *concatList) Get(index ref.Val) ref.Val { + ind, err := IndexOrError(index) + if err != nil { + return ValOrErr(index, err.Error()) + } + i := Int(ind) + if i < l.prevList.Size().(Int) { + return l.prevList.Get(i) + } + offset := i - l.prevList.Size().(Int) + return l.nextList.Get(offset) +} + +// IsZeroValue returns true if the list is empty. +func (l *concatList) IsZeroValue() bool { + return l.Size().(Int) == 0 +} + +// Iterator implements the traits.Iterable interface method. +func (l *concatList) Iterator() traits.Iterator { + return newListIterator(l) +} + +// Size implements the traits.Sizer interface method. +func (l *concatList) Size() ref.Val { + return l.prevList.Size().(Int).Add(l.nextList.Size()) +} + +// String converts the concatenated list to a human-readable string. +func (l *concatList) String() string { + var sb strings.Builder + sb.WriteString("[") + for i := Int(0); i < l.Size().(Int); i++ { + sb.WriteString(fmt.Sprintf("%v", l.Get(i))) + if i != l.Size().(Int)-1 { + sb.WriteString(", ") + } + } + sb.WriteString("]") + return sb.String() +} + +// Type implements the ref.Val interface method. +func (l *concatList) Type() ref.Type { + return ListType +} + +// Value implements the ref.Val interface method. +func (l *concatList) Value() any { + if l.value == nil { + merged := make([]any, l.Size().(Int)) + prevLen := l.prevList.Size().(Int) + for i := Int(0); i < prevLen; i++ { + merged[i] = l.prevList.Get(i).Value() + } + nextLen := l.nextList.Size().(Int) + for j := Int(0); j < nextLen; j++ { + merged[prevLen+j] = l.nextList.Get(j).Value() + } + l.value = merged + } + return l.value +} + +func newListIterator(listValue traits.Lister) traits.Iterator { + return &listIterator{ + listValue: listValue, + len: listValue.Size().(Int), + } +} + +type listIterator struct { + *baseIterator + listValue traits.Lister + cursor Int + len Int +} + +// HasNext implements the traits.Iterator interface method. +func (it *listIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *listIterator) Next() ref.Val { + if it.HasNext() == True { + index := it.cursor + it.cursor++ + return it.listValue.Get(index) + } + return nil +} + +// IndexOrError converts an input index value into either a lossless integer index or an error. +func IndexOrError(index ref.Val) (int, error) { + switch iv := index.(type) { + case Int: + return int(iv), nil + case Double: + if ik, ok := doubleToInt64Lossless(float64(iv)); ok { + return int(ik), nil + } + return -1, fmt.Errorf("unsupported index value %v in list", index) + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(iv)); ok { + return int(ik), nil + } + return -1, fmt.Errorf("unsupported index value %v in list", index) + default: + return -1, fmt.Errorf("unsupported index type '%s' in list", index.Type()) + } +} diff --git a/vendor/github.com/google/cel-go/common/types/map.go b/vendor/github.com/google/cel-go/common/types/map.go new file mode 100644 index 00000000..739b7aab --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/map.go @@ -0,0 +1,854 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + "github.com/stoewer/go-strcase" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// NewDynamicMap returns a traits.Mapper value with dynamic key, value pairs. +func NewDynamicMap(adapter Adapter, value any) traits.Mapper { + refValue := reflect.ValueOf(value) + return &baseMap{ + Adapter: adapter, + mapAccessor: newReflectMapAccessor(adapter, refValue), + value: value, + size: refValue.Len(), + } +} + +// NewJSONStruct creates a traits.Mapper implementation backed by a JSON struct that has been +// encoded in protocol buffer form. +// +// The `adapter` argument provides type adaptation capabilities from proto to CEL. +func NewJSONStruct(adapter Adapter, value *structpb.Struct) traits.Mapper { + fields := value.GetFields() + return &baseMap{ + Adapter: adapter, + mapAccessor: newJSONStructAccessor(adapter, fields), + value: value, + size: len(fields), + } +} + +// NewRefValMap returns a specialized traits.Mapper with CEL valued keys and values. +func NewRefValMap(adapter Adapter, value map[ref.Val]ref.Val) traits.Mapper { + return &baseMap{ + Adapter: adapter, + mapAccessor: newRefValMapAccessor(value), + value: value, + size: len(value), + } +} + +// NewStringInterfaceMap returns a specialized traits.Mapper with string keys and interface values. +func NewStringInterfaceMap(adapter Adapter, value map[string]any) traits.Mapper { + return &baseMap{ + Adapter: adapter, + mapAccessor: newStringIfaceMapAccessor(adapter, value), + value: value, + size: len(value), + } +} + +// NewStringStringMap returns a specialized traits.Mapper with string keys and values. +func NewStringStringMap(adapter Adapter, value map[string]string) traits.Mapper { + return &baseMap{ + Adapter: adapter, + mapAccessor: newStringMapAccessor(value), + value: value, + size: len(value), + } +} + +// NewProtoMap returns a specialized traits.Mapper for handling protobuf map values. +func NewProtoMap(adapter Adapter, value *pb.Map) traits.Mapper { + return &protoMap{ + Adapter: adapter, + value: value, + } +} + +// mapAccessor is a private interface for finding values within a map and iterating over the keys. +// This interface implements portions of the API surface area required by the traits.Mapper +// interface. +type mapAccessor interface { + // Find returns a value, if one exists, for the input key. + // + // If the key is not found the function returns (nil, false). + Find(ref.Val) (ref.Val, bool) + + // Iterator returns an Iterator over the map key set. + Iterator() traits.Iterator +} + +// baseMap is a reflection based map implementation designed to handle a variety of map-like types. +// +// Since CEL is side-effect free, the base map represents an immutable object. +type baseMap struct { + // TypeAdapter used to convert keys and values accessed within the map. + Adapter + + // mapAccessor interface implementation used to find and iterate over map keys. + mapAccessor + + // value is the native Go value upon which the map type operators. + value any + + // size is the number of entries in the map. + size int +} + +// Contains implements the traits.Container interface method. +func (m *baseMap) Contains(index ref.Val) ref.Val { + _, found := m.Find(index) + return Bool(found) +} + +// ConvertToNative implements the ref.Val interface method. +func (m *baseMap) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the map is already assignable to the desired type return it, e.g. interfaces and + // maps with the same key value types. + if reflect.TypeOf(m.value).AssignableTo(typeDesc) { + return m.value, nil + } + if reflect.TypeOf(m).AssignableTo(typeDesc) { + return m, nil + } + switch typeDesc { + case anyValueType: + json, err := m.ConvertToNative(jsonStructType) + if err != nil { + return nil, err + } + return anypb.New(json.(proto.Message)) + case jsonValueType, jsonStructType: + jsonEntries, err := + m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) + if err != nil { + return nil, err + } + jsonMap := &structpb.Struct{Fields: jsonEntries.(map[string]*structpb.Value)} + if typeDesc == jsonStructType { + return jsonMap, nil + } + return structpb.NewStructValue(jsonMap), nil + } + + // Unwrap pointers, but track their use. + isPtr := false + if typeDesc.Kind() == reflect.Ptr { + tk := typeDesc + typeDesc = typeDesc.Elem() + if typeDesc.Kind() == reflect.Ptr { + return nil, fmt.Errorf("unsupported type conversion to '%v'", tk) + } + isPtr = true + } + switch typeDesc.Kind() { + // Map conversion. + case reflect.Map: + otherKey := typeDesc.Key() + otherElem := typeDesc.Elem() + nativeMap := reflect.MakeMapWithSize(typeDesc, m.size) + it := m.Iterator() + for it.HasNext() == True { + key := it.Next() + refKeyValue, err := key.ConvertToNative(otherKey) + if err != nil { + return nil, err + } + refElemValue, err := m.Get(key).ConvertToNative(otherElem) + if err != nil { + return nil, err + } + nativeMap.SetMapIndex(reflect.ValueOf(refKeyValue), reflect.ValueOf(refElemValue)) + } + return nativeMap.Interface(), nil + case reflect.Struct: + nativeStructPtr := reflect.New(typeDesc) + nativeStruct := nativeStructPtr.Elem() + it := m.Iterator() + for it.HasNext() == True { + key := it.Next() + // Ensure the field name being referenced is exported. + // Only exported (public) field names can be set by reflection, where the name + // must be at least one character in length and start with an upper-case letter. + fieldName := key.ConvertToType(StringType) + if IsError(fieldName) { + return nil, fieldName.(*Err) + } + name := string(fieldName.(String)) + name = strcase.UpperCamelCase(name) + fieldRef := nativeStruct.FieldByName(name) + if !fieldRef.IsValid() { + return nil, fmt.Errorf("type conversion error, no such field '%s' in type '%v'", name, typeDesc) + } + fieldValue, err := m.Get(key).ConvertToNative(fieldRef.Type()) + if err != nil { + return nil, err + } + fieldRef.Set(reflect.ValueOf(fieldValue)) + } + if isPtr { + return nativeStructPtr.Interface(), nil + } + return nativeStruct.Interface(), nil + } + return nil, fmt.Errorf("type conversion error from map to '%v'", typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (m *baseMap) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case MapType: + return m + case TypeType: + return MapType + } + return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (m *baseMap) Equal(other ref.Val) ref.Val { + otherMap, ok := other.(traits.Mapper) + if !ok { + return False + } + if m.Size() != otherMap.Size() { + return False + } + it := m.Iterator() + for it.HasNext() == True { + key := it.Next() + thisVal, _ := m.Find(key) + otherVal, found := otherMap.Find(key) + if !found { + return False + } + valEq := Equal(thisVal, otherVal) + if valEq == False { + return False + } + } + return True +} + +// Get implements the traits.Indexer interface method. +func (m *baseMap) Get(key ref.Val) ref.Val { + v, found := m.Find(key) + if !found { + return ValOrErr(v, "no such key: %v", key) + } + return v +} + +// IsZeroValue returns true if the map is empty. +func (m *baseMap) IsZeroValue() bool { + return m.size == 0 +} + +// Size implements the traits.Sizer interface method. +func (m *baseMap) Size() ref.Val { + return Int(m.size) +} + +// String converts the map into a human-readable string. +func (m *baseMap) String() string { + var sb strings.Builder + sb.WriteString("{") + it := m.Iterator() + i := 0 + for it.HasNext() == True { + k := it.Next() + v, _ := m.Find(k) + sb.WriteString(fmt.Sprintf("%v: %v", k, v)) + if i != m.size-1 { + sb.WriteString(", ") + } + i++ + } + sb.WriteString("}") + return sb.String() +} + +// Type implements the ref.Val interface method. +func (m *baseMap) Type() ref.Type { + return MapType +} + +// Value implements the ref.Val interface method. +func (m *baseMap) Value() any { + return m.value +} + +func newJSONStructAccessor(adapter Adapter, st map[string]*structpb.Value) mapAccessor { + return &jsonStructAccessor{ + Adapter: adapter, + st: st, + } +} + +type jsonStructAccessor struct { + Adapter + st map[string]*structpb.Value +} + +// Find searches the json struct field map for the input key value and returns (value, true) if +// found. +// +// If the key is not found the function returns (nil, false). +func (a *jsonStructAccessor) Find(key ref.Val) (ref.Val, bool) { + strKey, ok := key.(String) + if !ok { + return nil, false + } + keyVal, found := a.st[string(strKey)] + if !found { + return nil, false + } + return a.NativeToValue(keyVal), true +} + +// Iterator creates a new traits.Iterator from the set of JSON struct field names. +func (a *jsonStructAccessor) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]string, len(a.st)) + i := 0 + for k := range a.st { + mapKeys[i] = k + i++ + } + return &stringKeyIterator{ + mapKeys: mapKeys, + len: len(mapKeys), + } +} + +func newReflectMapAccessor(adapter Adapter, value reflect.Value) mapAccessor { + keyType := value.Type().Key() + return &reflectMapAccessor{ + Adapter: adapter, + refValue: value, + keyType: keyType, + } +} + +type reflectMapAccessor struct { + Adapter + refValue reflect.Value + keyType reflect.Type +} + +// Find converts the input key to a native Golang type and then uses reflection to find the key, +// returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (m *reflectMapAccessor) Find(key ref.Val) (ref.Val, bool) { + if m.refValue.Len() == 0 { + return nil, false + } + if keyVal, found := m.findInternal(key); found { + return keyVal, true + } + switch k := key.(type) { + // Double is not a valid proto map key type, so check for the key as an int or uint. + case Double: + if ik, ok := doubleToInt64Lossless(float64(k)); ok { + if keyVal, found := m.findInternal(Int(ik)); found { + return keyVal, true + } + } + if uk, ok := doubleToUint64Lossless(float64(k)); ok { + return m.findInternal(Uint(uk)) + } + // map keys of type double are not supported. + case Int: + if uk, ok := int64ToUint64Lossless(int64(k)); ok { + return m.findInternal(Uint(uk)) + } + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(k)); ok { + return m.findInternal(Int(ik)) + } + } + return nil, false +} + +// findInternal attempts to convert the incoming key to the map's internal native type +// and then returns the value, if found. +func (m *reflectMapAccessor) findInternal(key ref.Val) (ref.Val, bool) { + k, err := key.ConvertToNative(m.keyType) + if err != nil { + return nil, false + } + refKey := reflect.ValueOf(k) + val := m.refValue.MapIndex(refKey) + if val.IsValid() { + return m.NativeToValue(val.Interface()), true + } + return nil, false +} + +// Iterator creates a Golang reflection based traits.Iterator. +func (m *reflectMapAccessor) Iterator() traits.Iterator { + return &mapIterator{ + Adapter: m.Adapter, + mapKeys: m.refValue.MapRange(), + len: m.refValue.Len(), + } +} + +func newRefValMapAccessor(mapVal map[ref.Val]ref.Val) mapAccessor { + return &refValMapAccessor{mapVal: mapVal} +} + +type refValMapAccessor struct { + mapVal map[ref.Val]ref.Val +} + +// Find uses native map accesses to find the key, returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (a *refValMapAccessor) Find(key ref.Val) (ref.Val, bool) { + if len(a.mapVal) == 0 { + return nil, false + } + if keyVal, found := a.mapVal[key]; found { + return keyVal, true + } + switch k := key.(type) { + case Double: + if ik, ok := doubleToInt64Lossless(float64(k)); ok { + if keyVal, found := a.mapVal[Int(ik)]; found { + return keyVal, found + } + } + if uk, ok := doubleToUint64Lossless(float64(k)); ok { + keyVal, found := a.mapVal[Uint(uk)] + return keyVal, found + } + // map keys of type double are not supported. + case Int: + if uk, ok := int64ToUint64Lossless(int64(k)); ok { + keyVal, found := a.mapVal[Uint(uk)] + return keyVal, found + } + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(k)); ok { + keyVal, found := a.mapVal[Int(ik)] + return keyVal, found + } + } + return nil, false +} + +// Iterator produces a new traits.Iterator which iterates over the map keys via Golang reflection. +func (a *refValMapAccessor) Iterator() traits.Iterator { + return &mapIterator{ + Adapter: DefaultTypeAdapter, + mapKeys: reflect.ValueOf(a.mapVal).MapRange(), + len: len(a.mapVal), + } +} + +func newStringMapAccessor(strMap map[string]string) mapAccessor { + return &stringMapAccessor{mapVal: strMap} +} + +type stringMapAccessor struct { + mapVal map[string]string +} + +// Find uses native map accesses to find the key, returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (a *stringMapAccessor) Find(key ref.Val) (ref.Val, bool) { + strKey, ok := key.(String) + if !ok { + return nil, false + } + keyVal, found := a.mapVal[string(strKey)] + if !found { + return nil, false + } + return String(keyVal), true +} + +// Iterator creates a new traits.Iterator from the string key set of the map. +func (a *stringMapAccessor) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]string, len(a.mapVal)) + i := 0 + for k := range a.mapVal { + mapKeys[i] = k + i++ + } + return &stringKeyIterator{ + mapKeys: mapKeys, + len: len(mapKeys), + } +} + +func newStringIfaceMapAccessor(adapter Adapter, mapVal map[string]any) mapAccessor { + return &stringIfaceMapAccessor{ + Adapter: adapter, + mapVal: mapVal, + } +} + +type stringIfaceMapAccessor struct { + Adapter + mapVal map[string]any +} + +// Find uses native map accesses to find the key, returning (value, true) if present. +// +// If the key is not found the function returns (nil, false). +func (a *stringIfaceMapAccessor) Find(key ref.Val) (ref.Val, bool) { + strKey, ok := key.(String) + if !ok { + return nil, false + } + keyVal, found := a.mapVal[string(strKey)] + if !found { + return nil, false + } + return a.NativeToValue(keyVal), true +} + +// Iterator creates a new traits.Iterator from the string key set of the map. +func (a *stringIfaceMapAccessor) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]string, len(a.mapVal)) + i := 0 + for k := range a.mapVal { + mapKeys[i] = k + i++ + } + return &stringKeyIterator{ + mapKeys: mapKeys, + len: len(mapKeys), + } +} + +// protoMap is a specialized, separate implementation of the traits.Mapper interfaces tailored to +// accessing protoreflect.Map values. +type protoMap struct { + Adapter + value *pb.Map +} + +// Contains returns whether the map contains the given key. +func (m *protoMap) Contains(key ref.Val) ref.Val { + _, found := m.Find(key) + return Bool(found) +} + +// ConvertToNative implements the ref.Val interface method. +// +// Note, assignment to Golang struct types is not yet supported. +func (m *protoMap) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the map is already assignable to the desired type return it, e.g. interfaces and + // maps with the same key value types. + switch typeDesc { + case anyValueType: + json, err := m.ConvertToNative(jsonStructType) + if err != nil { + return nil, err + } + return anypb.New(json.(proto.Message)) + case jsonValueType, jsonStructType: + jsonEntries, err := + m.ConvertToNative(reflect.TypeOf(map[string]*structpb.Value{})) + if err != nil { + return nil, err + } + jsonMap := &structpb.Struct{ + Fields: jsonEntries.(map[string]*structpb.Value)} + if typeDesc == jsonStructType { + return jsonMap, nil + } + return structpb.NewStructValue(jsonMap), nil + } + switch typeDesc.Kind() { + case reflect.Struct, reflect.Ptr: + if reflect.TypeOf(m.value).AssignableTo(typeDesc) { + return m.value, nil + } + if reflect.TypeOf(m).AssignableTo(typeDesc) { + return m, nil + } + } + if typeDesc.Kind() != reflect.Map { + return nil, fmt.Errorf("unsupported type conversion: %v to map", typeDesc) + } + + keyType := m.value.KeyType.ReflectType() + valType := m.value.ValueType.ReflectType() + otherKeyType := typeDesc.Key() + otherValType := typeDesc.Elem() + mapVal := reflect.MakeMapWithSize(typeDesc, m.value.Len()) + var err error + m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool { + ntvKey := key.Interface() + ntvVal := val.Interface() + switch pv := ntvVal.(type) { + case protoreflect.Message: + ntvVal = pv.Interface() + } + if keyType == otherKeyType && valType == otherValType { + mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal)) + return true + } + celKey := m.NativeToValue(ntvKey) + celVal := m.NativeToValue(ntvVal) + ntvKey, err = celKey.ConvertToNative(otherKeyType) + if err != nil { + // early terminate the range loop. + return false + } + ntvVal, err = celVal.ConvertToNative(otherValType) + if err != nil { + // early terminate the range loop. + return false + } + mapVal.SetMapIndex(reflect.ValueOf(ntvKey), reflect.ValueOf(ntvVal)) + return true + }) + if err != nil { + return nil, err + } + return mapVal.Interface(), nil +} + +// ConvertToType implements the ref.Val interface method. +func (m *protoMap) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case MapType: + return m + case TypeType: + return MapType + } + return NewErr("type conversion error from '%s' to '%s'", MapType, typeVal) +} + +// Equal implements the ref.Val interface method. +func (m *protoMap) Equal(other ref.Val) ref.Val { + otherMap, ok := other.(traits.Mapper) + if !ok { + return False + } + if m.value.Map.Len() != int(otherMap.Size().(Int)) { + return False + } + var retVal ref.Val = True + m.value.Range(func(key protoreflect.MapKey, val protoreflect.Value) bool { + keyVal := m.NativeToValue(key.Interface()) + valVal := m.NativeToValue(val) + otherVal, found := otherMap.Find(keyVal) + if !found { + retVal = False + return false + } + valEq := Equal(valVal, otherVal) + if valEq != True { + retVal = valEq + return false + } + return true + }) + return retVal +} + +// Find returns whether the protoreflect.Map contains the input key. +// +// If the key is not found the function returns (nil, false). +func (m *protoMap) Find(key ref.Val) (ref.Val, bool) { + if keyVal, found := m.findInternal(key); found { + return keyVal, true + } + switch k := key.(type) { + // Double is not a valid proto map key type, so check for the key as an int or uint. + case Double: + if ik, ok := doubleToInt64Lossless(float64(k)); ok { + if keyVal, found := m.findInternal(Int(ik)); found { + return keyVal, true + } + } + if uk, ok := doubleToUint64Lossless(float64(k)); ok { + return m.findInternal(Uint(uk)) + } + // map keys of type double are not supported. + case Int: + if uk, ok := int64ToUint64Lossless(int64(k)); ok { + return m.findInternal(Uint(uk)) + } + case Uint: + if ik, ok := uint64ToInt64Lossless(uint64(k)); ok { + return m.findInternal(Int(ik)) + } + } + return nil, false +} + +// findInternal attempts to convert the incoming key to the map's internal native type +// and then returns the value, if found. +func (m *protoMap) findInternal(key ref.Val) (ref.Val, bool) { + // Convert the input key to the expected protobuf key type. + ntvKey, err := key.ConvertToNative(m.value.KeyType.ReflectType()) + if err != nil { + return nil, false + } + // Use protoreflection to get the key value. + val := m.value.Get(protoreflect.ValueOf(ntvKey).MapKey()) + if !val.IsValid() { + return nil, false + } + // Perform nominal type unwrapping from the input value. + switch v := val.Interface().(type) { + case protoreflect.List, protoreflect.Map: + // Maps do not support list or map values + return nil, false + default: + return m.NativeToValue(v), true + } +} + +// Get implements the traits.Indexer interface method. +func (m *protoMap) Get(key ref.Val) ref.Val { + v, found := m.Find(key) + if !found { + return ValOrErr(v, "no such key: %v", key) + } + return v +} + +// IsZeroValue returns true if the map is empty. +func (m *protoMap) IsZeroValue() bool { + return m.value.Len() == 0 +} + +// Iterator implements the traits.Iterable interface method. +func (m *protoMap) Iterator() traits.Iterator { + // Copy the keys to make their order stable. + mapKeys := make([]protoreflect.MapKey, 0, m.value.Len()) + m.value.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + mapKeys = append(mapKeys, k) + return true + }) + return &protoMapIterator{ + Adapter: m.Adapter, + mapKeys: mapKeys, + len: m.value.Len(), + } +} + +// Size returns the number of entries in the protoreflect.Map. +func (m *protoMap) Size() ref.Val { + return Int(m.value.Len()) +} + +// Type implements the ref.Val interface method. +func (m *protoMap) Type() ref.Type { + return MapType +} + +// Value implements the ref.Val interface method. +func (m *protoMap) Value() any { + return m.value +} + +type mapIterator struct { + *baseIterator + Adapter + mapKeys *reflect.MapIter + cursor int + len int +} + +// HasNext implements the traits.Iterator interface method. +func (it *mapIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *mapIterator) Next() ref.Val { + if it.HasNext() == True && it.mapKeys.Next() { + it.cursor++ + refKey := it.mapKeys.Key() + return it.NativeToValue(refKey.Interface()) + } + return nil +} + +type protoMapIterator struct { + *baseIterator + Adapter + mapKeys []protoreflect.MapKey + cursor int + len int +} + +// HasNext implements the traits.Iterator interface method. +func (it *protoMapIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *protoMapIterator) Next() ref.Val { + if it.HasNext() == True { + index := it.cursor + it.cursor++ + refKey := it.mapKeys[index] + return it.NativeToValue(refKey.Interface()) + } + return nil +} + +type stringKeyIterator struct { + *baseIterator + mapKeys []string + cursor int + len int +} + +// HasNext implements the traits.Iterator interface method. +func (it *stringKeyIterator) HasNext() ref.Val { + return Bool(it.cursor < it.len) +} + +// Next implements the traits.Iterator interface method. +func (it *stringKeyIterator) Next() ref.Val { + if it.HasNext() == True { + index := it.cursor + it.cursor++ + return String(it.mapKeys[index]) + } + return nil +} diff --git a/vendor/github.com/google/cel-go/common/types/null.go b/vendor/github.com/google/cel-go/common/types/null.go new file mode 100644 index 00000000..926ca3dc --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/null.go @@ -0,0 +1,111 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +// Null type implementation. +type Null structpb.NullValue + +var ( + // NullValue singleton. + NullValue = Null(structpb.NullValue_NULL_VALUE) + + // golang reflect type for Null values. + nullReflectType = reflect.TypeOf(NullValue) +) + +// ConvertToNative implements ref.Val.ConvertToNative. +func (n Null) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Int32: + switch typeDesc { + case jsonNullType: + return structpb.NullValue_NULL_VALUE, nil + case nullReflectType: + return n, nil + } + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Convert to a JSON-null before packing to an Any field since the enum value for JSON + // null cannot be packed directly. + pb, err := n.ConvertToNative(jsonValueType) + if err != nil { + return nil, err + } + return anypb.New(pb.(proto.Message)) + case jsonValueType: + return structpb.NewNullValue(), nil + case boolWrapperType, byteWrapperType, doubleWrapperType, floatWrapperType, + int32WrapperType, int64WrapperType, stringWrapperType, uint32WrapperType, + uint64WrapperType: + return nil, nil + } + case reflect.Interface: + nv := n.Value() + if reflect.TypeOf(nv).Implements(typeDesc) { + return nv, nil + } + if reflect.TypeOf(n).Implements(typeDesc) { + return n, nil + } + } + // If the type conversion isn't supported return an error. + return nil, fmt.Errorf("type conversion error from '%v' to '%v'", NullType, typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (n Null) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String("null") + case NullType: + return n + case TypeType: + return NullType + } + return NewErr("type conversion error from '%s' to '%s'", NullType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (n Null) Equal(other ref.Val) ref.Val { + return Bool(NullType == other.Type()) +} + +// IsZeroValue returns true as null always represents an absent value. +func (n Null) IsZeroValue() bool { + return true +} + +// Type implements ref.Val.Type. +func (n Null) Type() ref.Type { + return NullType +} + +// Value implements ref.Val.Value. +func (n Null) Value() any { + return structpb.NullValue_NULL_VALUE +} diff --git a/vendor/github.com/google/cel-go/common/types/object.go b/vendor/github.com/google/cel-go/common/types/object.go new file mode 100644 index 00000000..8ba0af9f --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/object.go @@ -0,0 +1,165 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +type protoObj struct { + Adapter + value proto.Message + typeDesc *pb.TypeDescription + typeValue ref.Val +} + +// NewObject returns an object based on a proto.Message value which handles +// conversion between protobuf type values and expression type values. +// Objects support indexing and iteration. +// +// Note: the type value is pulled from the list of registered types within the +// type provider. If the proto type is not registered within the type provider, +// then this will result in an error within the type adapter / provider. +func NewObject(adapter Adapter, + typeDesc *pb.TypeDescription, + typeValue ref.Val, + value proto.Message) ref.Val { + return &protoObj{ + Adapter: adapter, + value: value, + typeDesc: typeDesc, + typeValue: typeValue} +} + +func (o *protoObj) ConvertToNative(typeDesc reflect.Type) (any, error) { + srcPB := o.value + if reflect.TypeOf(srcPB).AssignableTo(typeDesc) { + return srcPB, nil + } + if reflect.TypeOf(o).AssignableTo(typeDesc) { + return o, nil + } + switch typeDesc { + case anyValueType: + _, isAny := srcPB.(*anypb.Any) + if isAny { + return srcPB, nil + } + return anypb.New(srcPB) + case jsonValueType: + // Marshal the proto to JSON first, and then rehydrate as protobuf.Value as there is no + // support for direct conversion from proto.Message to protobuf.Value. + bytes, err := protojson.Marshal(srcPB) + if err != nil { + return nil, err + } + json := &structpb.Value{} + err = protojson.Unmarshal(bytes, json) + if err != nil { + return nil, err + } + return json, nil + default: + if typeDesc == o.typeDesc.ReflectType() { + return o.value, nil + } + if typeDesc.Kind() == reflect.Ptr { + val := reflect.New(typeDesc.Elem()).Interface() + dstPB, ok := val.(proto.Message) + if ok { + err := pb.Merge(dstPB, srcPB) + if err != nil { + return nil, fmt.Errorf("type conversion error: %v", err) + } + return dstPB, nil + } + } + } + return nil, fmt.Errorf("type conversion error from '%T' to '%v'", o.value, typeDesc) +} + +func (o *protoObj) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + default: + if o.Type().TypeName() == typeVal.TypeName() { + return o + } + case TypeType: + return o.typeValue + } + return NewErr("type conversion error from '%s' to '%s'", o.typeDesc.Name(), typeVal) +} + +func (o *protoObj) Equal(other ref.Val) ref.Val { + otherPB, ok := other.Value().(proto.Message) + return Bool(ok && pb.Equal(o.value, otherPB)) +} + +// IsSet tests whether a field which is defined is set to a non-default value. +func (o *protoObj) IsSet(field ref.Val) ref.Val { + protoFieldName, ok := field.(String) + if !ok { + return MaybeNoSuchOverloadErr(field) + } + protoFieldStr := string(protoFieldName) + fd, found := o.typeDesc.FieldByName(protoFieldStr) + if !found { + return NewErr("no such field '%s'", field) + } + if fd.IsSet(o.value) { + return True + } + return False +} + +// IsZeroValue returns true if the protobuf object is empty. +func (o *protoObj) IsZeroValue() bool { + return proto.Equal(o.value, o.typeDesc.Zero()) +} + +func (o *protoObj) Get(index ref.Val) ref.Val { + protoFieldName, ok := index.(String) + if !ok { + return MaybeNoSuchOverloadErr(index) + } + protoFieldStr := string(protoFieldName) + fd, found := o.typeDesc.FieldByName(protoFieldStr) + if !found { + return NewErr("no such field '%s'", index) + } + fv, err := fd.GetFrom(o.value) + if err != nil { + return NewErr(err.Error()) + } + return o.NativeToValue(fv) +} + +func (o *protoObj) Type() ref.Type { + return o.typeValue.(ref.Type) +} + +func (o *protoObj) Value() any { + return o.value +} diff --git a/vendor/github.com/google/cel-go/common/types/optional.go b/vendor/github.com/google/cel-go/common/types/optional.go new file mode 100644 index 00000000..97845a74 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/optional.go @@ -0,0 +1,108 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "reflect" + + "github.com/google/cel-go/common/types/ref" +) + +var ( + // OptionalType indicates the runtime type of an optional value. + OptionalType = NewOpaqueType("optional_type") + + // OptionalNone is a sentinel value which is used to indicate an empty optional value. + OptionalNone = &Optional{} +) + +// OptionalOf returns an optional value which wraps a concrete CEL value. +func OptionalOf(value ref.Val) *Optional { + return &Optional{value: value} +} + +// Optional value which points to a value if non-empty. +type Optional struct { + value ref.Val +} + +// HasValue returns true if the optional has a value. +func (o *Optional) HasValue() bool { + return o.value != nil +} + +// GetValue returns the wrapped value contained in the optional. +func (o *Optional) GetValue() ref.Val { + if !o.HasValue() { + return NewErr("optional.none() dereference") + } + return o.value +} + +// ConvertToNative implements the ref.Val interface method. +func (o *Optional) ConvertToNative(typeDesc reflect.Type) (any, error) { + if !o.HasValue() { + return nil, errors.New("optional.none() dereference") + } + return o.value.ConvertToNative(typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (o *Optional) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case OptionalType: + return o + case TypeType: + return OptionalType + } + return NewErr("type conversion error from '%s' to '%s'", OptionalType, typeVal) +} + +// Equal determines whether the values contained by two optional values are equal. +func (o *Optional) Equal(other ref.Val) ref.Val { + otherOpt, isOpt := other.(*Optional) + if !isOpt { + return False + } + if !o.HasValue() { + return Bool(!otherOpt.HasValue()) + } + if !otherOpt.HasValue() { + return False + } + return o.value.Equal(otherOpt.value) +} + +func (o *Optional) String() string { + if o.HasValue() { + return fmt.Sprintf("optional(%v)", o.GetValue()) + } + return "optional.none()" +} + +// Type implements the ref.Val interface method. +func (o *Optional) Type() ref.Type { + return OptionalType +} + +// Value returns the underlying 'Value()' of the wrapped value, if present. +func (o *Optional) Value() any { + if o.value == nil { + return nil + } + return o.value.Value() +} diff --git a/vendor/github.com/google/cel-go/common/types/overflow.go b/vendor/github.com/google/cel-go/common/types/overflow.go new file mode 100644 index 00000000..dcb66ef5 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/overflow.go @@ -0,0 +1,429 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "math" + "time" +) + +var ( + doubleTwoTo64 = math.Ldexp(1.0, 64) +) + +// addInt64Checked performs addition with overflow detection of two int64 values. +// +// If the operation fails the error return value will be non-nil. +func addInt64Checked(x, y int64) (int64, error) { + if (y > 0 && x > math.MaxInt64-y) || (y < 0 && x < math.MinInt64-y) { + return 0, errIntOverflow + } + return x + y, nil +} + +// subtractInt64Checked performs subtraction with overflow detection of two int64 values. +// +// If the operation fails the error return value will be non-nil. +func subtractInt64Checked(x, y int64) (int64, error) { + if (y < 0 && x > math.MaxInt64+y) || (y > 0 && x < math.MinInt64+y) { + return 0, errIntOverflow + } + return x - y, nil +} + +// negateInt64Checked performs negation with overflow detection of an int64. +// +// If the operation fails the error return value will be non-nil. +func negateInt64Checked(x int64) (int64, error) { + // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. + if x == math.MinInt64 { + return 0, errIntOverflow + } + return -x, nil +} + +// multiplyInt64Checked performs multiplication with overflow detection of two int64 value. +// +// If the operation fails the error return value will be non-nil. +func multiplyInt64Checked(x, y int64) (int64, error) { + // Detecting multiplication overflow is more complicated than the others. The first two detect + // attempting to negate MinInt64, which would result in MaxInt64+1. The other four detect normal + // overflow conditions. + if (x == -1 && y == math.MinInt64) || (y == -1 && x == math.MinInt64) || + // x is positive, y is positive + (x > 0 && y > 0 && x > math.MaxInt64/y) || + // x is positive, y is negative + (x > 0 && y < 0 && y < math.MinInt64/x) || + // x is negative, y is positive + (x < 0 && y > 0 && x < math.MinInt64/y) || + // x is negative, y is negative + (x < 0 && y < 0 && y < math.MaxInt64/x) { + return 0, errIntOverflow + } + return x * y, nil +} + +// divideInt64Checked performs division with overflow detection of two int64 values, +// as well as a division by zero check. +// +// If the operation fails the error return value will be non-nil. +func divideInt64Checked(x, y int64) (int64, error) { + // Division by zero. + if y == 0 { + return 0, errDivideByZero + } + // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. + if x == math.MinInt64 && y == -1 { + return 0, errIntOverflow + } + return x / y, nil +} + +// moduloInt64Checked performs modulo with overflow detection of two int64 values +// as well as a modulus by zero check. +// +// If the operation fails the error return value will be non-nil. +func moduloInt64Checked(x, y int64) (int64, error) { + // Modulus by zero. + if y == 0 { + return 0, errModulusByZero + } + // In twos complement, negating MinInt64 would result in a valid of MaxInt64+1. + if x == math.MinInt64 && y == -1 { + return 0, errIntOverflow + } + return x % y, nil +} + +// addUint64Checked performs addition with overflow detection of two uint64 values. +// +// If the operation fails due to overflow the error return value will be non-nil. +func addUint64Checked(x, y uint64) (uint64, error) { + if y > 0 && x > math.MaxUint64-y { + return 0, errUintOverflow + } + return x + y, nil +} + +// subtractUint64Checked performs subtraction with overflow detection of two uint64 values. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractUint64Checked(x, y uint64) (uint64, error) { + if y > x { + return 0, errUintOverflow + } + return x - y, nil +} + +// multiplyUint64Checked performs multiplication with overflow detection of two uint64 values. +// +// If the operation fails due to overflow the error return value will be non-nil. +func multiplyUint64Checked(x, y uint64) (uint64, error) { + if y != 0 && x > math.MaxUint64/y { + return 0, errUintOverflow + } + return x * y, nil +} + +// divideUint64Checked performs division with a test for division by zero. +// +// If the operation fails the error return value will be non-nil. +func divideUint64Checked(x, y uint64) (uint64, error) { + if y == 0 { + return 0, errDivideByZero + } + return x / y, nil +} + +// moduloUint64Checked performs modulo with a test for modulus by zero. +// +// If the operation fails the error return value will be non-nil. +func moduloUint64Checked(x, y uint64) (uint64, error) { + if y == 0 { + return 0, errModulusByZero + } + return x % y, nil +} + +// addDurationChecked performs addition with overflow detection of two time.Durations. +// +// If the operation fails due to overflow the error return value will be non-nil. +func addDurationChecked(x, y time.Duration) (time.Duration, error) { + val, err := addInt64Checked(int64(x), int64(y)) + if err != nil { + return time.Duration(0), err + } + return time.Duration(val), nil +} + +// subtractDurationChecked performs subtraction with overflow detection of two time.Durations. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractDurationChecked(x, y time.Duration) (time.Duration, error) { + val, err := subtractInt64Checked(int64(x), int64(y)) + if err != nil { + return time.Duration(0), err + } + return time.Duration(val), nil +} + +// negateDurationChecked performs negation with overflow detection of a time.Duration. +// +// If the operation fails due to overflow the error return value will be non-nil. +func negateDurationChecked(x time.Duration) (time.Duration, error) { + val, err := negateInt64Checked(int64(x)) + if err != nil { + return time.Duration(0), err + } + return time.Duration(val), nil +} + +// addDurationChecked performs addition with overflow detection of a time.Time and time.Duration. +// +// If the operation fails due to overflow the error return value will be non-nil. +func addTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) { + // This is tricky. A time is represented as (int64, int32) where the first is seconds and second + // is nanoseconds. A duration is int64 representing nanoseconds. We cannot normalize time to int64 + // as it could potentially overflow. The only way to proceed is to break time and duration into + // second and nanosecond components. + + // First we break time into its components by truncating and subtracting. + sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds. + nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting. + + // Second we break duration into its components by dividing and modulo. + sec2 := int64(y) / int64(time.Second) // Truncate to seconds. + nsec2 := int64(y) % int64(time.Second) // Get remainder. + + // Add seconds first, detecting any overflow. + sec, err := addInt64Checked(sec1, sec2) + if err != nil { + return time.Time{}, err + } + // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999]. + nsec := nsec1 + nsec2 + + // We need to normalize nanoseconds to be positive and carry extra nanoseconds to seconds. + // Adapted from time.Unix(int64, int64). + if nsec < 0 || nsec >= int64(time.Second) { + // Add seconds. + sec, err = addInt64Checked(sec, nsec/int64(time.Second)) + if err != nil { + return time.Time{}, err + } + + nsec -= (nsec / int64(time.Second)) * int64(time.Second) + if nsec < 0 { + // Subtract an extra second + sec, err = addInt64Checked(sec, -1) + if err != nil { + return time.Time{}, err + } + nsec += int64(time.Second) + } + } + + // Check if the the number of seconds from Unix epoch is within our acceptable range. + if sec < minUnixTime || sec > maxUnixTime { + return time.Time{}, errTimestampOverflow + } + + // Return resulting time and propagate time zone. + return time.Unix(sec, nsec).In(x.Location()), nil +} + +// subtractTimeChecked performs subtraction with overflow detection of two time.Time. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractTimeChecked(x, y time.Time) (time.Duration, error) { + // Similar to addTimeDurationOverflow() above. + + // First we break time into its components by truncating and subtracting. + sec1 := x.Truncate(time.Second).Unix() // Truncate to seconds. + nsec1 := x.Sub(x.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting. + + // Second we break duration into its components by truncating and subtracting. + sec2 := y.Truncate(time.Second).Unix() // Truncate to seconds. + nsec2 := y.Sub(y.Truncate(time.Second)).Nanoseconds() // Get nanoseconds by truncating and subtracting. + + // Subtract seconds first, detecting any overflow. + sec, err := subtractInt64Checked(sec1, sec2) + if err != nil { + return time.Duration(0), err + } + + // Nanoseconds cannot overflow as time.Time normalizes them to [0, 999999999]. + nsec := nsec1 - nsec2 + + // Scale seconds to nanoseconds detecting overflow. + tsec, err := multiplyInt64Checked(sec, int64(time.Second)) + if err != nil { + return time.Duration(0), err + } + + // Lastly we need to add the two nanoseconds together. + val, err := addInt64Checked(tsec, nsec) + if err != nil { + return time.Duration(0), err + } + + return time.Duration(val), nil +} + +// subtractTimeDurationChecked performs subtraction with overflow detection of a time.Time and +// time.Duration. +// +// If the operation fails due to overflow the error return value will be non-nil. +func subtractTimeDurationChecked(x time.Time, y time.Duration) (time.Time, error) { + // The easiest way to implement this is to negate y and add them. + // x - y = x + -y + val, err := negateDurationChecked(y) + if err != nil { + return time.Time{}, err + } + return addTimeDurationChecked(x, val) +} + +// doubleToInt64Checked converts a double to an int64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func doubleToInt64Checked(v float64) (int64, error) { + if math.IsInf(v, 0) || math.IsNaN(v) || v <= float64(math.MinInt64) || v >= float64(math.MaxInt64) { + return 0, errIntOverflow + } + return int64(v), nil +} + +// doubleToInt64Checked converts a double to a uint64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func doubleToUint64Checked(v float64) (uint64, error) { + if math.IsInf(v, 0) || math.IsNaN(v) || v < 0 || v >= doubleTwoTo64 { + return 0, errUintOverflow + } + return uint64(v), nil +} + +// int64ToUint64Checked converts an int64 to a uint64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToUint64Checked(v int64) (uint64, error) { + if v < 0 { + return 0, errUintOverflow + } + return uint64(v), nil +} + +// int64ToInt8Checked converts an int64 to an int8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt8Checked(v int64) (int8, error) { + if v < math.MinInt8 || v > math.MaxInt8 { + return 0, errIntOverflow + } + return int8(v), nil +} + +// int64ToInt16Checked converts an int64 to an int16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt16Checked(v int64) (int16, error) { + if v < math.MinInt16 || v > math.MaxInt16 { + return 0, errIntOverflow + } + return int16(v), nil +} + +// int64ToInt32Checked converts an int64 to an int32 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func int64ToInt32Checked(v int64) (int32, error) { + if v < math.MinInt32 || v > math.MaxInt32 { + return 0, errIntOverflow + } + return int32(v), nil +} + +// uint64ToUint8Checked converts a uint64 to a uint8 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint8Checked(v uint64) (uint8, error) { + if v > math.MaxUint8 { + return 0, errUintOverflow + } + return uint8(v), nil +} + +// uint64ToUint16Checked converts a uint64 to a uint16 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint16Checked(v uint64) (uint16, error) { + if v > math.MaxUint16 { + return 0, errUintOverflow + } + return uint16(v), nil +} + +// uint64ToUint32Checked converts a uint64 to a uint32 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToUint32Checked(v uint64) (uint32, error) { + if v > math.MaxUint32 { + return 0, errUintOverflow + } + return uint32(v), nil +} + +// uint64ToInt64Checked converts a uint64 to an int64 value. +// +// If the conversion fails due to overflow the error return value will be non-nil. +func uint64ToInt64Checked(v uint64) (int64, error) { + if v > math.MaxInt64 { + return 0, errIntOverflow + } + return int64(v), nil +} + +func doubleToUint64Lossless(v float64) (uint64, bool) { + u, err := doubleToUint64Checked(v) + if err != nil { + return 0, false + } + if float64(u) != v { + return 0, false + } + return u, true +} + +func doubleToInt64Lossless(v float64) (int64, bool) { + i, err := doubleToInt64Checked(v) + if err != nil { + return 0, false + } + if float64(i) != v { + return 0, false + } + return i, true +} + +func int64ToUint64Lossless(v int64) (uint64, bool) { + u, err := int64ToUint64Checked(v) + return u, err == nil +} + +func uint64ToInt64Lossless(v uint64) (int64, bool) { + i, err := uint64ToInt64Checked(v) + return i, err == nil +} diff --git a/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel new file mode 100644 index 00000000..e2b9d37b --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/BUILD.bazel @@ -0,0 +1,53 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "checked.go", + "enum.go", + "equal.go", + "file.go", + "pb.go", + "type.go", + ], + importpath = "github.com/google/cel-go/common/types/pb", + deps = [ + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//encoding/protowire:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//reflect/protoregistry:go_default_library", + "@org_golang_google_protobuf//types/dynamicpb:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "equal_test.go", + "file_test.go", + "pb_test.go", + "type_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//checker/decls:go_default_library", + "//test/proto2pb:test_all_types_go_proto", + "//test/proto3pb:test_all_types_go_proto", + "@org_golang_google_protobuf//reflect/protodesc:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/descriptorpb:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/types/pb/checked.go b/vendor/github.com/google/cel-go/common/types/pb/checked.go new file mode 100644 index 00000000..312a6a07 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/checked.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +var ( + // CheckedPrimitives map from proto field descriptor type to expr.Type. + CheckedPrimitives = map[protoreflect.Kind]*exprpb.Type{ + protoreflect.BoolKind: checkedBool, + protoreflect.BytesKind: checkedBytes, + protoreflect.DoubleKind: checkedDouble, + protoreflect.FloatKind: checkedDouble, + protoreflect.Int32Kind: checkedInt, + protoreflect.Int64Kind: checkedInt, + protoreflect.Sint32Kind: checkedInt, + protoreflect.Sint64Kind: checkedInt, + protoreflect.Uint32Kind: checkedUint, + protoreflect.Uint64Kind: checkedUint, + protoreflect.Fixed32Kind: checkedUint, + protoreflect.Fixed64Kind: checkedUint, + protoreflect.Sfixed32Kind: checkedInt, + protoreflect.Sfixed64Kind: checkedInt, + protoreflect.StringKind: checkedString} + + // CheckedWellKnowns map from qualified proto type name to expr.Type for + // well-known proto types. + CheckedWellKnowns = map[string]*exprpb.Type{ + // Wrapper types. + "google.protobuf.BoolValue": checkedWrap(checkedBool), + "google.protobuf.BytesValue": checkedWrap(checkedBytes), + "google.protobuf.DoubleValue": checkedWrap(checkedDouble), + "google.protobuf.FloatValue": checkedWrap(checkedDouble), + "google.protobuf.Int64Value": checkedWrap(checkedInt), + "google.protobuf.Int32Value": checkedWrap(checkedInt), + "google.protobuf.UInt64Value": checkedWrap(checkedUint), + "google.protobuf.UInt32Value": checkedWrap(checkedUint), + "google.protobuf.StringValue": checkedWrap(checkedString), + // Well-known types. + "google.protobuf.Any": checkedAny, + "google.protobuf.Duration": checkedDuration, + "google.protobuf.Timestamp": checkedTimestamp, + // Json types. + "google.protobuf.ListValue": checkedListDyn, + "google.protobuf.NullValue": checkedNull, + "google.protobuf.Struct": checkedMapStringDyn, + "google.protobuf.Value": checkedDyn, + } + + // common types + checkedDyn = &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}} + // Wrapper and primitive types. + checkedBool = checkedPrimitive(exprpb.Type_BOOL) + checkedBytes = checkedPrimitive(exprpb.Type_BYTES) + checkedDouble = checkedPrimitive(exprpb.Type_DOUBLE) + checkedInt = checkedPrimitive(exprpb.Type_INT64) + checkedString = checkedPrimitive(exprpb.Type_STRING) + checkedUint = checkedPrimitive(exprpb.Type_UINT64) + // Well-known type equivalents. + checkedAny = checkedWellKnown(exprpb.Type_ANY) + checkedDuration = checkedWellKnown(exprpb.Type_DURATION) + checkedTimestamp = checkedWellKnown(exprpb.Type_TIMESTAMP) + // Json-based type equivalents. + checkedNull = &exprpb.Type{ + TypeKind: &exprpb.Type_Null{ + Null: structpb.NullValue_NULL_VALUE}} + checkedListDyn = &exprpb.Type{ + TypeKind: &exprpb.Type_ListType_{ + ListType: &exprpb.Type_ListType{ElemType: checkedDyn}}} + checkedMapStringDyn = &exprpb.Type{ + TypeKind: &exprpb.Type_MapType_{ + MapType: &exprpb.Type_MapType{ + KeyType: checkedString, + ValueType: checkedDyn}}} +) diff --git a/vendor/github.com/google/cel-go/common/types/pb/enum.go b/vendor/github.com/google/cel-go/common/types/pb/enum.go new file mode 100644 index 00000000..09a15463 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/enum.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// newEnumValueDescription produces an enum value description with the fully qualified enum value +// name and the enum value descriptor. +func newEnumValueDescription(name string, desc protoreflect.EnumValueDescriptor) *EnumValueDescription { + return &EnumValueDescription{ + enumValueName: name, + desc: desc, + } +} + +// EnumValueDescription maps a fully-qualified enum value name to its numeric value. +type EnumValueDescription struct { + enumValueName string + desc protoreflect.EnumValueDescriptor +} + +// Name returns the fully-qualified identifier name for the enum value. +func (ed *EnumValueDescription) Name() string { + return ed.enumValueName +} + +// Value returns the (numeric) value of the enum. +func (ed *EnumValueDescription) Value() int32 { + return int32(ed.desc.Number()) +} diff --git a/vendor/github.com/google/cel-go/common/types/pb/equal.go b/vendor/github.com/google/cel-go/common/types/pb/equal.go new file mode 100644 index 00000000..76893d85 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/equal.go @@ -0,0 +1,206 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "bytes" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// Equal returns whether two proto.Message instances are equal using the following criteria: +// +// - Messages must share the same instance of the type descriptor +// - Known set fields are compared using semantics equality +// - Bytes are compared using bytes.Equal +// - Scalar values are compared with operator == +// - List and map types are equal if they have the same length and all elements are equal +// - Messages are equal if they share the same descriptor and all set fields are equal +// - Unknown fields are compared using byte equality +// - NaN values are not equal to each other +// - google.protobuf.Any values are unpacked before comparison +// - If the type descriptor for a protobuf.Any cannot be found, byte equality is used rather than +// semantic equality. +// +// This method of proto equality mirrors the behavior of the C++ protobuf MessageDifferencer +// whereas the golang proto.Equal implementation mirrors the Java protobuf equals() methods +// behaviors which needed to treat NaN values as equal due to Java semantics. +func Equal(x, y proto.Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + xRef := x.ProtoReflect() + yRef := y.ProtoReflect() + return equalMessage(xRef, yRef) +} + +func equalMessage(mx, my protoreflect.Message) bool { + // Note, the original proto.Equal upon which this implementation is based does not specifically handle the + // case when both messages are invalid. It is assumed that the descriptors will be equal and that byte-wise + // comparison will be used, though the semantics of validity are neither clear, nor promised within the + // proto.Equal implementation. + if mx.IsValid() != my.IsValid() || mx.Descriptor() != my.Descriptor() { + return false + } + + // This is an innovation on the default proto.Equal where protobuf.Any values are unpacked before comparison + // as otherwise the Any values are compared by bytes rather than structurally. + if isAny(mx) && isAny(my) { + ax := mx.Interface().(*anypb.Any) + ay := my.Interface().(*anypb.Any) + // If the values are not the same type url, return false. + if ax.GetTypeUrl() != ay.GetTypeUrl() { + return false + } + // If the values are byte equal, then return true. + if bytes.Equal(ax.GetValue(), ay.GetValue()) { + return true + } + // Otherwise fall through to the semantic comparison of the any values. + x, err := ax.UnmarshalNew() + if err != nil { + return false + } + y, err := ay.UnmarshalNew() + if err != nil { + return false + } + // Recursively compare the unwrapped messages to ensure nested Any values are unwrapped accordingly. + return equalMessage(x.ProtoReflect(), y.ProtoReflect()) + } + + // Walk the set fields to determine field-wise equality + nx := 0 + equal := true + mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { + nx++ + equal = my.Has(fd) && equalField(fd, vx, my.Get(fd)) + return equal + }) + if !equal { + return false + } + // Establish the count of set fields on message y + ny := 0 + my.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + ny++ + return true + }) + // If the number of set fields is not equal return false. + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { + switch { + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + default: + return equalValue(fd, x, y) + } +} + +func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { + switch fd.Kind() { + case protoreflect.BoolKind: + return x.Bool() == y.Bool() + case protoreflect.EnumKind: + return x.Enum() == y.Enum() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + return x.Int() == y.Int() + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + return x.Uint() == y.Uint() + case protoreflect.FloatKind, protoreflect.DoubleKind: + return x.Float() == y.Float() + case protoreflect.StringKind: + return x.String() == y.String() + case protoreflect.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case protoreflect.MessageKind, protoreflect.GroupKind: + return equalMessage(x.Message(), y.Message()) + default: + return x.Interface() == y.Interface() + } +} + +func equalUnknown(x, y protoreflect.RawFields) bool { + lenX := len(x) + lenY := len(y) + if lenX != lenY { + return false + } + if lenX == 0 { + return true + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} + +func isAny(m protoreflect.Message) bool { + return string(m.Descriptor().FullName()) == "google.protobuf.Any" +} diff --git a/vendor/github.com/google/cel-go/common/types/pb/file.go b/vendor/github.com/google/cel-go/common/types/pb/file.go new file mode 100644 index 00000000..e323afb1 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/file.go @@ -0,0 +1,202 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + + dynamicpb "google.golang.org/protobuf/types/dynamicpb" +) + +// newFileDescription returns a FileDescription instance with a complete listing of all the message +// types and enum values, as well as a map of extensions declared within any scope in the file. +func newFileDescription(fileDesc protoreflect.FileDescriptor, pbdb *Db) (*FileDescription, extensionMap) { + metadata := collectFileMetadata(fileDesc) + enums := make(map[string]*EnumValueDescription) + for name, enumVal := range metadata.enumValues { + enums[name] = newEnumValueDescription(name, enumVal) + } + types := make(map[string]*TypeDescription) + for name, msgType := range metadata.msgTypes { + types[name] = newTypeDescription(name, msgType, pbdb.extensions) + } + fileExtMap := make(extensionMap) + for typeName, extensions := range metadata.msgExtensionMap { + messageExtMap, found := fileExtMap[typeName] + if !found { + messageExtMap = make(map[string]*FieldDescription) + } + for _, ext := range extensions { + extDesc := dynamicpb.NewExtensionType(ext).TypeDescriptor() + messageExtMap[string(ext.FullName())] = newFieldDescription(extDesc) + } + fileExtMap[typeName] = messageExtMap + } + return &FileDescription{ + name: fileDesc.Path(), + types: types, + enums: enums, + }, fileExtMap +} + +// FileDescription holds a map of all types and enum values declared within a proto file. +type FileDescription struct { + name string + types map[string]*TypeDescription + enums map[string]*EnumValueDescription +} + +// Copy creates a copy of the FileDescription with updated Db references within its types. +func (fd *FileDescription) Copy(pbdb *Db) *FileDescription { + typesCopy := make(map[string]*TypeDescription, len(fd.types)) + for k, v := range fd.types { + typesCopy[k] = v.Copy(pbdb) + } + return &FileDescription{ + name: fd.name, + types: typesCopy, + enums: fd.enums, + } +} + +// GetName returns the fully qualified file path for the file. +func (fd *FileDescription) GetName() string { + return fd.name +} + +// GetEnumDescription returns an EnumDescription for a qualified enum value +// name declared within the .proto file. +func (fd *FileDescription) GetEnumDescription(enumName string) (*EnumValueDescription, bool) { + ed, found := fd.enums[sanitizeProtoName(enumName)] + return ed, found +} + +// GetEnumNames returns the string names of all enum values in the file. +func (fd *FileDescription) GetEnumNames() []string { + enumNames := make([]string, len(fd.enums)) + i := 0 + for _, e := range fd.enums { + enumNames[i] = e.Name() + i++ + } + return enumNames +} + +// GetTypeDescription returns a TypeDescription for a qualified protobuf message type name +// declared within the .proto file. +func (fd *FileDescription) GetTypeDescription(typeName string) (*TypeDescription, bool) { + td, found := fd.types[sanitizeProtoName(typeName)] + return td, found +} + +// GetTypeNames returns the list of all type names contained within the file. +func (fd *FileDescription) GetTypeNames() []string { + typeNames := make([]string, len(fd.types)) + i := 0 + for _, t := range fd.types { + typeNames[i] = t.Name() + i++ + } + return typeNames +} + +// sanitizeProtoName strips the leading '.' from the proto message name. +func sanitizeProtoName(name string) string { + if name != "" && name[0] == '.' { + return name[1:] + } + return name +} + +// fileMetadata is a flattened view of message types and enum values within a file descriptor. +type fileMetadata struct { + // msgTypes maps from fully-qualified message name to descriptor. + msgTypes map[string]protoreflect.MessageDescriptor + // enumValues maps from fully-qualified enum value to enum value descriptor. + enumValues map[string]protoreflect.EnumValueDescriptor + // msgExtensionMap maps from the protobuf message name being extended to a set of extensions + // for the type. + msgExtensionMap map[string][]protoreflect.ExtensionDescriptor + + // TODO: support enum type definitions for use in future type-check enhancements. +} + +// collectFileMetadata traverses the proto file object graph to collect message types and enum +// values and index them by their fully qualified names. +func collectFileMetadata(fileDesc protoreflect.FileDescriptor) *fileMetadata { + msgTypes := make(map[string]protoreflect.MessageDescriptor) + enumValues := make(map[string]protoreflect.EnumValueDescriptor) + msgExtensionMap := make(map[string][]protoreflect.ExtensionDescriptor) + collectMsgTypes(fileDesc.Messages(), msgTypes, enumValues, msgExtensionMap) + collectEnumValues(fileDesc.Enums(), enumValues) + collectExtensions(fileDesc.Extensions(), msgExtensionMap) + return &fileMetadata{ + msgTypes: msgTypes, + enumValues: enumValues, + msgExtensionMap: msgExtensionMap, + } +} + +// collectMsgTypes recursively collects messages, nested messages, and nested enums into a map of +// fully qualified protobuf names to descriptors. +func collectMsgTypes(msgTypes protoreflect.MessageDescriptors, + msgTypeMap map[string]protoreflect.MessageDescriptor, + enumValueMap map[string]protoreflect.EnumValueDescriptor, + msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) { + for i := 0; i < msgTypes.Len(); i++ { + msgType := msgTypes.Get(i) + msgTypeMap[string(msgType.FullName())] = msgType + nestedMsgTypes := msgType.Messages() + if nestedMsgTypes.Len() != 0 { + collectMsgTypes(nestedMsgTypes, msgTypeMap, enumValueMap, msgExtensionMap) + } + nestedEnumTypes := msgType.Enums() + if nestedEnumTypes.Len() != 0 { + collectEnumValues(nestedEnumTypes, enumValueMap) + } + nestedExtensions := msgType.Extensions() + if nestedExtensions.Len() != 0 { + collectExtensions(nestedExtensions, msgExtensionMap) + } + } +} + +// collectEnumValues accumulates the enum values within an enum declaration. +func collectEnumValues(enumTypes protoreflect.EnumDescriptors, enumValueMap map[string]protoreflect.EnumValueDescriptor) { + for i := 0; i < enumTypes.Len(); i++ { + enumType := enumTypes.Get(i) + enumTypeValues := enumType.Values() + for j := 0; j < enumTypeValues.Len(); j++ { + enumValue := enumTypeValues.Get(j) + enumValueName := fmt.Sprintf("%s.%s", string(enumType.FullName()), string(enumValue.Name())) + enumValueMap[enumValueName] = enumValue + } + } +} + +func collectExtensions(extensions protoreflect.ExtensionDescriptors, msgExtensionMap map[string][]protoreflect.ExtensionDescriptor) { + for i := 0; i < extensions.Len(); i++ { + ext := extensions.Get(i) + extendsMsg := string(ext.ContainingMessage().FullName()) + msgExts, found := msgExtensionMap[extendsMsg] + if !found { + msgExts = []protoreflect.ExtensionDescriptor{} + } + msgExts = append(msgExts, ext) + msgExtensionMap[extendsMsg] = msgExts + } +} diff --git a/vendor/github.com/google/cel-go/common/types/pb/pb.go b/vendor/github.com/google/cel-go/common/types/pb/pb.go new file mode 100644 index 00000000..eadebcb0 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/pb.go @@ -0,0 +1,258 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pb reflects over protocol buffer descriptors to generate objects +// that simplify type, enum, and field lookup. +package pb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "google.golang.org/protobuf/types/known/anypb" + durpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" + tspb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Db maps from file / message / enum name to file description. +// +// Each Db is isolated from each other, and while information about protobuf descriptors may be +// fetched from the global protobuf registry, no descriptors are added to this registry, else +// the isolation guarantees of the Db object would be violated. +type Db struct { + revFileDescriptorMap map[string]*FileDescription + // files contains the deduped set of FileDescriptions whose types are contained in the pb.Db. + files []*FileDescription + // extensions contains the mapping between a given type name, extension name and its FieldDescription + extensions map[string]map[string]*FieldDescription +} + +// extensionsMap is a type alias to a map[typeName]map[extensionName]*FieldDescription +type extensionMap = map[string]map[string]*FieldDescription + +var ( + // DefaultDb used at evaluation time or unless overridden at check time. + DefaultDb = &Db{ + revFileDescriptorMap: make(map[string]*FileDescription), + files: []*FileDescription{}, + extensions: make(extensionMap), + } +) + +// Merge will copy the source proto message into the destination, or error if the merge cannot be completed. +// +// Unlike the proto.Merge, this method will fallback to proto.Marshal/Unmarshal of the two proto messages do not +// share the same instance of their type descriptor. +func Merge(dstPB, srcPB proto.Message) error { + src, dst := srcPB.ProtoReflect(), dstPB.ProtoReflect() + if src.Descriptor() == dst.Descriptor() { + proto.Merge(dstPB, srcPB) + return nil + } + if src.Descriptor().FullName() != dst.Descriptor().FullName() { + return fmt.Errorf("pb.Merge() arguments must be the same type. got: %v, %v", + dst.Descriptor().FullName(), src.Descriptor().FullName()) + } + bytes, err := proto.Marshal(srcPB) + if err != nil { + return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to marshal source proto: %v", err) + } + err = proto.Unmarshal(bytes, dstPB) + if err != nil { + return fmt.Errorf("pb.Merge(dstPB, srcPB) failed to unmarshal to dest proto: %v", err) + } + return nil +} + +// NewDb creates a new `pb.Db` with an empty type name to file description map. +func NewDb() *Db { + pbdb := &Db{ + revFileDescriptorMap: make(map[string]*FileDescription), + files: []*FileDescription{}, + extensions: make(extensionMap), + } + // The FileDescription objects in the default db contain lazily initialized TypeDescription + // values which may point to the state contained in the DefaultDb irrespective of this shallow + // copy; however, the type graph for a field is idempotently computed, and is guaranteed to + // only be initialized once thanks to atomic values within the TypeDescription objects, so it + // is safe to share these values across instances. + for k, v := range DefaultDb.revFileDescriptorMap { + pbdb.revFileDescriptorMap[k] = v + } + pbdb.files = append(pbdb.files, DefaultDb.files...) + return pbdb +} + +// Copy creates a copy of the current database with its own internal descriptor mapping. +func (pbdb *Db) Copy() *Db { + copy := NewDb() + for _, fd := range pbdb.files { + hasFile := false + for _, fd2 := range copy.files { + if fd2 == fd { + hasFile = true + } + } + if !hasFile { + fd = fd.Copy(copy) + copy.files = append(copy.files, fd) + } + for _, enumValName := range fd.GetEnumNames() { + copy.revFileDescriptorMap[enumValName] = fd + } + for _, msgTypeName := range fd.GetTypeNames() { + copy.revFileDescriptorMap[msgTypeName] = fd + } + copy.revFileDescriptorMap[fd.GetName()] = fd + } + for typeName, extFieldMap := range pbdb.extensions { + copyExtFieldMap, found := copy.extensions[typeName] + if !found { + copyExtFieldMap = make(map[string]*FieldDescription, len(extFieldMap)) + } + for extFieldName, fd := range extFieldMap { + copyExtFieldMap[extFieldName] = fd + } + copy.extensions[typeName] = copyExtFieldMap + } + return copy +} + +// FileDescriptions returns the set of file descriptions associated with this db. +func (pbdb *Db) FileDescriptions() []*FileDescription { + return pbdb.files +} + +// RegisterDescriptor produces a `FileDescription` from a `FileDescriptor` and registers the +// message and enum types into the `pb.Db`. +func (pbdb *Db) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) (*FileDescription, error) { + fd, found := pbdb.revFileDescriptorMap[fileDesc.Path()] + if found { + return fd, nil + } + // Make sure to search the global registry to see if a protoreflect.FileDescriptor for + // the file specified has been linked into the binary. If so, use the copy of the descriptor + // from the global cache. + // + // Note: Proto reflection relies on descriptor values being object equal rather than object + // equivalence. This choice means that a FieldDescriptor generated from a FileDescriptorProto + // will be incompatible with the FieldDescriptor in the global registry and any message created + // from that global registry. + globalFD, err := protoregistry.GlobalFiles.FindFileByPath(fileDesc.Path()) + if err == nil { + fileDesc = globalFD + } + var fileExtMap extensionMap + fd, fileExtMap = newFileDescription(fileDesc, pbdb) + for _, enumValName := range fd.GetEnumNames() { + pbdb.revFileDescriptorMap[enumValName] = fd + } + for _, msgTypeName := range fd.GetTypeNames() { + pbdb.revFileDescriptorMap[msgTypeName] = fd + } + pbdb.revFileDescriptorMap[fd.GetName()] = fd + + // Return the specific file descriptor registered. + pbdb.files = append(pbdb.files, fd) + + // Index the protobuf message extensions from the file into the pbdb + for typeName, extMap := range fileExtMap { + typeExtMap, found := pbdb.extensions[typeName] + if !found { + pbdb.extensions[typeName] = extMap + continue + } + for extName, field := range extMap { + typeExtMap[extName] = field + } + } + return fd, nil +} + +// RegisterMessage produces a `FileDescription` from a `message` and registers the message and all +// other definitions within the message file into the `pb.Db`. +func (pbdb *Db) RegisterMessage(message proto.Message) (*FileDescription, error) { + msgDesc := message.ProtoReflect().Descriptor() + msgName := msgDesc.FullName() + typeName := sanitizeProtoName(string(msgName)) + if fd, found := pbdb.revFileDescriptorMap[typeName]; found { + return fd, nil + } + return pbdb.RegisterDescriptor(msgDesc.ParentFile()) +} + +// DescribeEnum takes a qualified enum name and returns an `EnumDescription` if it exists in the +// `pb.Db`. +func (pbdb *Db) DescribeEnum(enumName string) (*EnumValueDescription, bool) { + enumName = sanitizeProtoName(enumName) + if fd, found := pbdb.revFileDescriptorMap[enumName]; found { + return fd.GetEnumDescription(enumName) + } + return nil, false +} + +// DescribeType returns a `TypeDescription` for the `typeName` if it exists in the `pb.Db`. +func (pbdb *Db) DescribeType(typeName string) (*TypeDescription, bool) { + typeName = sanitizeProtoName(typeName) + if fd, found := pbdb.revFileDescriptorMap[typeName]; found { + return fd.GetTypeDescription(typeName) + } + return nil, false +} + +// CollectFileDescriptorSet builds a file descriptor set associated with the file where the input +// message is declared. +func CollectFileDescriptorSet(message proto.Message) map[string]protoreflect.FileDescriptor { + fdMap := map[string]protoreflect.FileDescriptor{} + parentFile := message.ProtoReflect().Descriptor().ParentFile() + fdMap[parentFile.Path()] = parentFile + // Initialize list of dependencies + deps := make([]protoreflect.FileImport, parentFile.Imports().Len()) + for i := 0; i < parentFile.Imports().Len(); i++ { + deps[i] = parentFile.Imports().Get(i) + } + // Expand list for new dependencies + for i := 0; i < len(deps); i++ { + dep := deps[i] + if _, found := fdMap[dep.Path()]; found { + continue + } + fdMap[dep.Path()] = dep.FileDescriptor + for j := 0; j < dep.FileDescriptor.Imports().Len(); j++ { + deps = append(deps, dep.FileDescriptor.Imports().Get(j)) + } + } + return fdMap +} + +func init() { + // Describe well-known types to ensure they can always be resolved by the check and interpret + // execution phases. + // + // The following subset of message types is enough to ensure that all well-known types can + // resolved in the runtime, since describing the value results in describing the whole file + // where the message is declared. + DefaultDb.RegisterMessage(&anypb.Any{}) + DefaultDb.RegisterMessage(&durpb.Duration{}) + DefaultDb.RegisterMessage(&emptypb.Empty{}) + DefaultDb.RegisterMessage(&tspb.Timestamp{}) + DefaultDb.RegisterMessage(&structpb.Value{}) + DefaultDb.RegisterMessage(&wrapperspb.BoolValue{}) +} diff --git a/vendor/github.com/google/cel-go/common/types/pb/type.go b/vendor/github.com/google/cel-go/common/types/pb/type.go new file mode 100644 index 00000000..bdd474c9 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/pb/type.go @@ -0,0 +1,614 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pb + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + dynamicpb "google.golang.org/protobuf/types/dynamicpb" + anypb "google.golang.org/protobuf/types/known/anypb" + dpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + tpb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// description is a private interface used to make it convenient to perform type unwrapping at +// the TypeDescription or FieldDescription level. +type description interface { + // Zero returns an empty immutable protobuf message when the description is a protobuf message + // type. + Zero() proto.Message +} + +// newTypeDescription produces a TypeDescription value for the fully-qualified proto type name +// with a given descriptor. +func newTypeDescription(typeName string, desc protoreflect.MessageDescriptor, extensions extensionMap) *TypeDescription { + msgType := dynamicpb.NewMessageType(desc) + msgZero := dynamicpb.NewMessage(desc) + fieldMap := map[string]*FieldDescription{} + fields := desc.Fields() + for i := 0; i < fields.Len(); i++ { + f := fields.Get(i) + fieldMap[string(f.Name())] = newFieldDescription(f) + } + return &TypeDescription{ + typeName: typeName, + desc: desc, + msgType: msgType, + fieldMap: fieldMap, + extensions: extensions, + reflectType: reflectTypeOf(msgZero), + zeroMsg: zeroValueOf(msgZero), + } +} + +// TypeDescription is a collection of type metadata relevant to expression +// checking and evaluation. +type TypeDescription struct { + typeName string + desc protoreflect.MessageDescriptor + msgType protoreflect.MessageType + fieldMap map[string]*FieldDescription + extensions extensionMap + reflectType reflect.Type + zeroMsg proto.Message +} + +// Copy copies the type description with updated references to the Db. +func (td *TypeDescription) Copy(pbdb *Db) *TypeDescription { + return &TypeDescription{ + typeName: td.typeName, + desc: td.desc, + msgType: td.msgType, + fieldMap: td.fieldMap, + extensions: pbdb.extensions, + reflectType: td.reflectType, + zeroMsg: td.zeroMsg, + } +} + +// FieldMap returns a string field name to FieldDescription map. +func (td *TypeDescription) FieldMap() map[string]*FieldDescription { + return td.fieldMap +} + +// FieldByName returns (FieldDescription, true) if the field name is declared within the type. +func (td *TypeDescription) FieldByName(name string) (*FieldDescription, bool) { + fd, found := td.fieldMap[name] + if found { + return fd, true + } + extFieldMap, found := td.extensions[td.typeName] + if !found { + return nil, false + } + fd, found = extFieldMap[name] + return fd, found +} + +// MaybeUnwrap accepts a proto message as input and unwraps it to a primitive CEL type if possible. +// +// This method returns the unwrapped value and 'true', else the original value and 'false'. +func (td *TypeDescription) MaybeUnwrap(msg proto.Message) (any, bool, error) { + return unwrap(td, msg) +} + +// Name returns the fully-qualified name of the type. +func (td *TypeDescription) Name() string { + return string(td.desc.FullName()) +} + +// New returns a mutable proto message +func (td *TypeDescription) New() protoreflect.Message { + return td.msgType.New() +} + +// ReflectType returns the Golang reflect.Type for this type. +func (td *TypeDescription) ReflectType() reflect.Type { + return td.reflectType +} + +// Zero returns the zero proto.Message value for this type. +func (td *TypeDescription) Zero() proto.Message { + return td.zeroMsg +} + +// newFieldDescription creates a new field description from a protoreflect.FieldDescriptor. +func newFieldDescription(fieldDesc protoreflect.FieldDescriptor) *FieldDescription { + var reflectType reflect.Type + var zeroMsg proto.Message + switch fieldDesc.Kind() { + case protoreflect.EnumKind: + reflectType = reflectTypeOf(protoreflect.EnumNumber(0)) + case protoreflect.GroupKind, protoreflect.MessageKind: + zeroMsg = dynamicpb.NewMessage(fieldDesc.Message()) + reflectType = reflectTypeOf(zeroMsg) + default: + reflectType = reflectTypeOf(fieldDesc.Default().Interface()) + if fieldDesc.IsList() { + var elemValue protoreflect.Value + if fieldDesc.IsExtension() { + et := dynamicpb.NewExtensionType(fieldDesc) + elemValue = et.New().List().NewElement() + } else { + parentMsgType := fieldDesc.ContainingMessage() + parentMsg := dynamicpb.NewMessage(parentMsgType) + listField := parentMsg.NewField(fieldDesc).List() + elemValue = listField.NewElement() + } + elem := elemValue.Interface() + switch elemType := elem.(type) { + case protoreflect.Message: + elem = elemType.Interface() + } + reflectType = reflectTypeOf(elem) + } + } + // Ensure the list type is appropriately reflected as a Go-native list. + if fieldDesc.IsList() { + reflectType = reflect.SliceOf(reflectType) + } + var keyType, valType *FieldDescription + if fieldDesc.IsMap() { + keyType = newFieldDescription(fieldDesc.MapKey()) + valType = newFieldDescription(fieldDesc.MapValue()) + } + return &FieldDescription{ + desc: fieldDesc, + KeyType: keyType, + ValueType: valType, + reflectType: reflectType, + zeroMsg: zeroValueOf(zeroMsg), + } +} + +// FieldDescription holds metadata related to fields declared within a type. +type FieldDescription struct { + // KeyType holds the key FieldDescription for map fields. + KeyType *FieldDescription + // ValueType holds the value FieldDescription for map fields. + ValueType *FieldDescription + + desc protoreflect.FieldDescriptor + reflectType reflect.Type + zeroMsg proto.Message +} + +// CheckedType returns the type-definition used at type-check time. +func (fd *FieldDescription) CheckedType() *exprpb.Type { + if fd.desc.IsMap() { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MapType_{ + MapType: &exprpb.Type_MapType{ + KeyType: fd.KeyType.typeDefToType(), + ValueType: fd.ValueType.typeDefToType(), + }, + }, + } + } + if fd.desc.IsList() { + return &exprpb.Type{ + TypeKind: &exprpb.Type_ListType_{ + ListType: &exprpb.Type_ListType{ + ElemType: fd.typeDefToType()}}} + } + return fd.typeDefToType() +} + +// Descriptor returns the protoreflect.FieldDescriptor for this type. +func (fd *FieldDescription) Descriptor() protoreflect.FieldDescriptor { + return fd.desc +} + +// IsSet returns whether the field is set on the target value, per the proto presence conventions +// of proto2 or proto3 accordingly. +// +// This function implements the FieldType.IsSet function contract which can be used to operate on +// more than just protobuf field accesses; however, the target here must be a protobuf.Message. +func (fd *FieldDescription) IsSet(target any) bool { + switch v := target.(type) { + case proto.Message: + pbRef := v.ProtoReflect() + pbDesc := pbRef.Descriptor() + if pbDesc == fd.desc.ContainingMessage() { + // When the target protobuf shares the same message descriptor instance as the field + // descriptor, use the cached field descriptor value. + return pbRef.Has(fd.desc) + } + // Otherwise, fallback to a dynamic lookup of the field descriptor from the target + // instance as an attempt to use the cached field descriptor will result in a panic. + return pbRef.Has(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))) + default: + return false + } +} + +// GetFrom returns the accessor method associated with the field on the proto generated struct. +// +// If the field is not set, the proto default value is returned instead. +// +// This function implements the FieldType.GetFrom function contract which can be used to operate +// on more than just protobuf field accesses; however, the target here must be a protobuf.Message. +func (fd *FieldDescription) GetFrom(target any) (any, error) { + v, ok := target.(proto.Message) + if !ok { + return nil, fmt.Errorf("unsupported field selection target: (%T)%v", target, target) + } + pbRef := v.ProtoReflect() + pbDesc := pbRef.Descriptor() + var fieldVal any + if pbDesc == fd.desc.ContainingMessage() { + // When the target protobuf shares the same message descriptor instance as the field + // descriptor, use the cached field descriptor value. + fieldVal = pbRef.Get(fd.desc).Interface() + } else { + // Otherwise, fallback to a dynamic lookup of the field descriptor from the target + // instance as an attempt to use the cached field descriptor will result in a panic. + fieldVal = pbRef.Get(pbDesc.Fields().ByName(protoreflect.Name(fd.Name()))).Interface() + } + switch fv := fieldVal.(type) { + // Fast-path return for primitive types. + case bool, []byte, float32, float64, int32, int64, string, uint32, uint64, protoreflect.List: + return fv, nil + case protoreflect.EnumNumber: + return int64(fv), nil + case protoreflect.Map: + // Return a wrapper around the protobuf-reflected Map types which carries additional + // information about the key and value definitions of the map. + return &Map{Map: fv, KeyType: fd.KeyType, ValueType: fd.ValueType}, nil + case protoreflect.Message: + // Make sure to unwrap well-known protobuf types before returning. + unwrapped, _, err := fd.MaybeUnwrapDynamic(fv) + return unwrapped, err + default: + return fv, nil + } +} + +// IsEnum returns true if the field type refers to an enum value. +func (fd *FieldDescription) IsEnum() bool { + return fd.ProtoKind() == protoreflect.EnumKind +} + +// IsMap returns true if the field is of map type. +func (fd *FieldDescription) IsMap() bool { + return fd.desc.IsMap() +} + +// IsMessage returns true if the field is of message type. +func (fd *FieldDescription) IsMessage() bool { + kind := fd.ProtoKind() + return kind == protoreflect.MessageKind || kind == protoreflect.GroupKind +} + +// IsOneof returns true if the field is declared within a oneof block. +func (fd *FieldDescription) IsOneof() bool { + return fd.desc.ContainingOneof() != nil +} + +// IsList returns true if the field is a repeated value. +// +// This method will also return true for map values, so check whether the +// field is also a map. +func (fd *FieldDescription) IsList() bool { + return fd.desc.IsList() +} + +// MaybeUnwrapDynamic takes the reflected protoreflect.Message and determines whether the +// value can be unwrapped to a more primitive CEL type. +// +// This function returns the unwrapped value and 'true' on success, or the original value +// and 'false' otherwise. +func (fd *FieldDescription) MaybeUnwrapDynamic(msg protoreflect.Message) (any, bool, error) { + return unwrapDynamic(fd, msg) +} + +// Name returns the CamelCase name of the field within the proto-based struct. +func (fd *FieldDescription) Name() string { + return string(fd.desc.Name()) +} + +// ProtoKind returns the protobuf reflected kind of the field. +func (fd *FieldDescription) ProtoKind() protoreflect.Kind { + return fd.desc.Kind() +} + +// ReflectType returns the Golang reflect.Type for this field. +func (fd *FieldDescription) ReflectType() reflect.Type { + return fd.reflectType +} + +// String returns the fully qualified name of the field within its type as well as whether the +// field occurs within a oneof. +func (fd *FieldDescription) String() string { + return fmt.Sprintf("%v.%s `oneof=%t`", fd.desc.ContainingMessage().FullName(), fd.Name(), fd.IsOneof()) +} + +// Zero returns the zero value for the protobuf message represented by this field. +// +// If the field is not a proto.Message type, the zero value is nil. +func (fd *FieldDescription) Zero() proto.Message { + return fd.zeroMsg +} + +func (fd *FieldDescription) typeDefToType() *exprpb.Type { + if fd.IsMessage() { + msgType := string(fd.desc.Message().FullName()) + if wk, found := CheckedWellKnowns[msgType]; found { + return wk + } + return checkedMessageType(msgType) + } + if fd.IsEnum() { + return checkedInt + } + return CheckedPrimitives[fd.ProtoKind()] +} + +// Map wraps the protoreflect.Map object with a key and value FieldDescription for use in +// retrieving individual elements within CEL value data types. +type Map struct { + protoreflect.Map + KeyType *FieldDescription + ValueType *FieldDescription +} + +func checkedMessageType(name string) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_MessageType{MessageType: name}} +} + +func checkedPrimitive(primitive exprpb.Type_PrimitiveType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Primitive{Primitive: primitive}} +} + +func checkedWellKnown(wellKnown exprpb.Type_WellKnownType) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_WellKnown{WellKnown: wellKnown}} +} + +func checkedWrap(t *exprpb.Type) *exprpb.Type { + return &exprpb.Type{ + TypeKind: &exprpb.Type_Wrapper{Wrapper: t.GetPrimitive()}} +} + +// unwrap unwraps the provided proto.Message value, potentially based on the description if the +// input message is a *dynamicpb.Message which obscures the typing information from Go. +// +// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'. +func unwrap(desc description, msg proto.Message) (any, bool, error) { + switch v := msg.(type) { + case *anypb.Any: + dynMsg, err := v.UnmarshalNew() + if err != nil { + return v, false, err + } + return unwrapDynamic(desc, dynMsg.ProtoReflect()) + case *dynamicpb.Message: + return unwrapDynamic(desc, v) + case *dpb.Duration: + return v.AsDuration(), true, nil + case *tpb.Timestamp: + return v.AsTime(), true, nil + case *structpb.Value: + switch v.GetKind().(type) { + case *structpb.Value_BoolValue: + return v.GetBoolValue(), true, nil + case *structpb.Value_ListValue: + return v.GetListValue(), true, nil + case *structpb.Value_NullValue: + return structpb.NullValue_NULL_VALUE, true, nil + case *structpb.Value_NumberValue: + return v.GetNumberValue(), true, nil + case *structpb.Value_StringValue: + return v.GetStringValue(), true, nil + case *structpb.Value_StructValue: + return v.GetStructValue(), true, nil + default: + return structpb.NullValue_NULL_VALUE, true, nil + } + case *wrapperspb.BoolValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.BytesValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.DoubleValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.FloatValue: + if v == nil { + return nil, true, nil + } + return float64(v.GetValue()), true, nil + case *wrapperspb.Int32Value: + if v == nil { + return nil, true, nil + } + return int64(v.GetValue()), true, nil + case *wrapperspb.Int64Value: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.StringValue: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + case *wrapperspb.UInt32Value: + if v == nil { + return nil, true, nil + } + return uint64(v.GetValue()), true, nil + case *wrapperspb.UInt64Value: + if v == nil { + return nil, true, nil + } + return v.GetValue(), true, nil + } + return msg, false, nil +} + +// unwrapDynamic unwraps a reflected protobuf Message value. +// +// Returns the unwrapped value and 'true' if unwrapped, otherwise the input value and 'false'. +func unwrapDynamic(desc description, refMsg protoreflect.Message) (any, bool, error) { + msg := refMsg.Interface() + if !refMsg.IsValid() { + msg = desc.Zero() + } + // In order to ensure that these wrapped types match the expectations of the CEL type system + // the dynamicpb.Message must be merged with an protobuf instance of the well-known type value. + typeName := string(refMsg.Descriptor().FullName()) + switch typeName { + case "google.protobuf.Any": + // Note, Any values require further unwrapping; however, this unwrapping may or may not + // be to a well-known type. If the unwrapped value is a well-known type it will be further + // unwrapped before being returned to the caller. Otherwise, the dynamic protobuf object + // represented by the Any will be returned. + unwrappedAny := &anypb.Any{} + err := Merge(unwrappedAny, msg) + if err != nil { + return nil, false, fmt.Errorf("unwrap dynamic field failed: %v", err) + } + dynMsg, err := unwrappedAny.UnmarshalNew() + if err != nil { + // Allow the error to move further up the stack as it should result in an type + // conversion error if the caller does not recover it somehow. + return nil, false, fmt.Errorf("unmarshal dynamic any failed: %v", err) + } + // Attempt to unwrap the dynamic type, otherwise return the dynamic message. + unwrapped, nested, err := unwrapDynamic(desc, dynMsg.ProtoReflect()) + if err == nil && nested { + return unwrapped, true, nil + } + return dynMsg, true, err + case "google.protobuf.BoolValue", + "google.protobuf.BytesValue", + "google.protobuf.DoubleValue", + "google.protobuf.FloatValue", + "google.protobuf.Int32Value", + "google.protobuf.Int64Value", + "google.protobuf.StringValue", + "google.protobuf.UInt32Value", + "google.protobuf.UInt64Value": + // The msg value is ignored when dealing with wrapper types as they have a null or value + // behavior, rather than the standard zero value behavior of other proto message types. + if !refMsg.IsValid() { + return structpb.NullValue_NULL_VALUE, true, nil + } + valueField := refMsg.Descriptor().Fields().ByName("value") + return refMsg.Get(valueField).Interface(), true, nil + case "google.protobuf.Duration": + unwrapped := &dpb.Duration{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped.AsDuration(), true, nil + case "google.protobuf.ListValue": + unwrapped := &structpb.ListValue{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped, true, nil + case "google.protobuf.NullValue": + return structpb.NullValue_NULL_VALUE, true, nil + case "google.protobuf.Struct": + unwrapped := &structpb.Struct{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped, true, nil + case "google.protobuf.Timestamp": + unwrapped := &tpb.Timestamp{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrapped.AsTime(), true, nil + case "google.protobuf.Value": + unwrapped := &structpb.Value{} + err := Merge(unwrapped, msg) + if err != nil { + return nil, false, err + } + return unwrap(desc, unwrapped) + } + return msg, false, nil +} + +// reflectTypeOf intercepts the reflect.Type call to ensure that dynamicpb.Message types preserve +// well-known protobuf reflected types expected by the CEL type system. +func reflectTypeOf(val any) reflect.Type { + switch v := val.(type) { + case proto.Message: + return reflect.TypeOf(zeroValueOf(v)) + default: + return reflect.TypeOf(v) + } +} + +// zeroValueOf will return the strongest possible proto.Message representing the default protobuf +// message value of the input msg type. +func zeroValueOf(msg proto.Message) proto.Message { + if msg == nil { + return nil + } + typeName := string(msg.ProtoReflect().Descriptor().FullName()) + zeroVal, found := zeroValueMap[typeName] + if found { + return zeroVal + } + return msg +} + +var ( + jsonValueTypeURL = "types.googleapis.com/google.protobuf.Value" + + zeroValueMap = map[string]proto.Message{ + "google.protobuf.Any": &anypb.Any{TypeUrl: jsonValueTypeURL}, + "google.protobuf.Duration": &dpb.Duration{}, + "google.protobuf.ListValue": &structpb.ListValue{}, + "google.protobuf.Struct": &structpb.Struct{}, + "google.protobuf.Timestamp": &tpb.Timestamp{}, + "google.protobuf.Value": &structpb.Value{}, + "google.protobuf.BoolValue": wrapperspb.Bool(false), + "google.protobuf.BytesValue": wrapperspb.Bytes([]byte{}), + "google.protobuf.DoubleValue": wrapperspb.Double(0.0), + "google.protobuf.FloatValue": wrapperspb.Float(0.0), + "google.protobuf.Int32Value": wrapperspb.Int32(0), + "google.protobuf.Int64Value": wrapperspb.Int64(0), + "google.protobuf.StringValue": wrapperspb.String(""), + "google.protobuf.UInt32Value": wrapperspb.UInt32(0), + "google.protobuf.UInt64Value": wrapperspb.UInt64(0), + } +) diff --git a/vendor/github.com/google/cel-go/common/types/provider.go b/vendor/github.com/google/cel-go/common/types/provider.go new file mode 100644 index 00000000..936a4e28 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/provider.go @@ -0,0 +1,766 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "time" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + anypb "google.golang.org/protobuf/types/known/anypb" + dpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + tpb "google.golang.org/protobuf/types/known/timestamppb" +) + +// Adapter converts native Go values of varying type and complexity to equivalent CEL values. +type Adapter = ref.TypeAdapter + +// Provider specifies functions for creating new object instances and for resolving +// enum values by name. +type Provider interface { + // EnumValue returns the numeric value of the given enum value name. + EnumValue(enumName string) ref.Val + + // FindIdent takes a qualified identifier name and returns a ref.Val if one exists. + FindIdent(identName string) (ref.Val, bool) + + // FindStructType returns the Type give a qualified type name. + // + // For historical reasons, only struct types are expected to be returned through this + // method, and the type values are expected to be wrapped in a TypeType instance using + // TypeTypeWithParam(). + // + // Returns false if not found. + FindStructType(structType string) (*Type, bool) + + // FindStructFieldNames returns thet field names associated with the type, if the type + // is found. + FindStructFieldNames(structType string) ([]string, bool) + + // FieldStructFieldType returns the field type for a checked type value. Returns + // false if the field could not be found. + FindStructFieldType(structType, fieldName string) (*FieldType, bool) + + // NewValue creates a new type value from a qualified name and map of field + // name to value. + // + // Note, for each value, the Val.ConvertToNative function will be invoked + // to convert the Val to the field's native type. If an error occurs during + // conversion, the NewValue will be a types.Err. + NewValue(structType string, fields map[string]ref.Val) ref.Val +} + +// FieldType represents a field's type value and whether that field supports presence detection. +type FieldType struct { + // Type of the field as a CEL native type value. + Type *Type + + // IsSet indicates whether the field is set on an input object. + IsSet ref.FieldTester + + // GetFrom retrieves the field value on the input object, if set. + GetFrom ref.FieldGetter +} + +// Registry provides type information for a set of registered types. +type Registry struct { + revTypeMap map[string]*Type + pbdb *pb.Db +} + +// NewRegistry accepts a list of proto message instances and returns a type +// provider which can create new instances of the provided message or any +// message that proto depends upon in its FileDescriptor. +func NewRegistry(types ...proto.Message) (*Registry, error) { + p := &Registry{ + revTypeMap: make(map[string]*Type), + pbdb: pb.NewDb(), + } + err := p.RegisterType( + BoolType, + BytesType, + DoubleType, + DurationType, + IntType, + ListType, + MapType, + NullType, + StringType, + TimestampType, + TypeType, + UintType) + if err != nil { + return nil, err + } + // This block ensures that the well-known protobuf types are registered by default. + for _, fd := range p.pbdb.FileDescriptions() { + err = p.registerAllTypes(fd) + if err != nil { + return nil, err + } + } + for _, msgType := range types { + err = p.RegisterMessage(msgType) + if err != nil { + return nil, err + } + } + return p, nil +} + +// NewEmptyRegistry returns a registry which is completely unconfigured. +func NewEmptyRegistry() *Registry { + return &Registry{ + revTypeMap: make(map[string]*Type), + pbdb: pb.NewDb(), + } +} + +// Copy copies the current state of the registry into its own memory space. +func (p *Registry) Copy() *Registry { + copy := &Registry{ + revTypeMap: make(map[string]*Type), + pbdb: p.pbdb.Copy(), + } + for k, v := range p.revTypeMap { + copy.revTypeMap[k] = v + } + return copy +} + +// EnumValue returns the numeric value of the given enum value name. +func (p *Registry) EnumValue(enumName string) ref.Val { + enumVal, found := p.pbdb.DescribeEnum(enumName) + if !found { + return NewErr("unknown enum name '%s'", enumName) + } + return Int(enumVal.Value()) +} + +// FindFieldType returns the field type for a checked type value. Returns false if +// the field could not be found. +// +// Deprecated: use FindStructFieldType +func (p *Registry) FindFieldType(structType, fieldName string) (*ref.FieldType, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return nil, false + } + field, found := msgType.FieldByName(fieldName) + if !found { + return nil, false + } + return &ref.FieldType{ + Type: field.CheckedType(), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, true +} + +// FindStructFieldNames returns the set of field names for the given struct type, +// if the type exists in the registry. +func (p *Registry) FindStructFieldNames(structType string) ([]string, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return []string{}, false + } + fieldMap := msgType.FieldMap() + fields := make([]string, len(fieldMap)) + idx := 0 + for f := range fieldMap { + fields[idx] = f + idx++ + } + return fields, true +} + +// FindStructFieldType returns the field type for a checked type value. Returns +// false if the field could not be found. +func (p *Registry) FindStructFieldType(structType, fieldName string) (*FieldType, bool) { + msgType, found := p.pbdb.DescribeType(structType) + if !found { + return nil, false + } + field, found := msgType.FieldByName(fieldName) + if !found { + return nil, false + } + return &FieldType{ + Type: fieldDescToCELType(field), + IsSet: field.IsSet, + GetFrom: field.GetFrom}, true +} + +// FindIdent takes a qualified identifier name and returns a ref.Val if one exists. +func (p *Registry) FindIdent(identName string) (ref.Val, bool) { + if t, found := p.revTypeMap[identName]; found { + return t, true + } + if enumVal, found := p.pbdb.DescribeEnum(identName); found { + return Int(enumVal.Value()), true + } + return nil, false +} + +// FindType looks up the Type given a qualified typeName. Returns false if not found. +// +// Deprecated: use FindStructType +func (p *Registry) FindType(structType string) (*exprpb.Type, bool) { + if _, found := p.pbdb.DescribeType(structType); !found { + return nil, false + } + if structType != "" && structType[0] == '.' { + structType = structType[1:] + } + return &exprpb.Type{ + TypeKind: &exprpb.Type_Type{ + Type: &exprpb.Type{ + TypeKind: &exprpb.Type_MessageType{ + MessageType: structType}}}}, true +} + +// FindStructType returns the Type give a qualified type name. +// +// For historical reasons, only struct types are expected to be returned through this +// method, and the type values are expected to be wrapped in a TypeType instance using +// TypeTypeWithParam(). +// +// Returns false if not found. +func (p *Registry) FindStructType(structType string) (*Type, bool) { + if _, found := p.pbdb.DescribeType(structType); !found { + return nil, false + } + if structType != "" && structType[0] == '.' { + structType = structType[1:] + } + return NewTypeTypeWithParam(NewObjectType(structType)), true +} + +// NewValue creates a new type value from a qualified name and map of field +// name to value. +// +// Note, for each value, the Val.ConvertToNative function will be invoked +// to convert the Val to the field's native type. If an error occurs during +// conversion, the NewValue will be a types.Err. +func (p *Registry) NewValue(structType string, fields map[string]ref.Val) ref.Val { + td, found := p.pbdb.DescribeType(structType) + if !found { + return NewErr("unknown type '%s'", structType) + } + msg := td.New() + fieldMap := td.FieldMap() + for name, value := range fields { + field, found := fieldMap[name] + if !found { + return NewErr("no such field: %s", name) + } + err := msgSetField(msg, field, value) + if err != nil { + return &Err{error: err} + } + } + return p.NativeToValue(msg.Interface()) +} + +// RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. +func (p *Registry) RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error { + fd, err := p.pbdb.RegisterDescriptor(fileDesc) + if err != nil { + return err + } + return p.registerAllTypes(fd) +} + +// RegisterMessage registers a protocol buffer message and its dependencies. +func (p *Registry) RegisterMessage(message proto.Message) error { + fd, err := p.pbdb.RegisterMessage(message) + if err != nil { + return err + } + return p.registerAllTypes(fd) +} + +// RegisterType registers a type value with the provider which ensures the provider is aware of how to +// map the type to an identifier. +// +// If the `ref.Type` value is a `*types.Type` it will be registered directly by its runtime type name. +// If the `ref.Type` value is not a `*types.Type` instance, a `*types.Type` instance which reflects the +// traits present on the input and the runtime type name. By default this foreign type will be treated +// as a types.StructKind. To avoid potential issues where the `ref.Type` values does not match the +// generated `*types.Type` instance, consider always using the `*types.Type` to represent type extensions +// to CEL, even when they're not based on protobuf types. +func (p *Registry) RegisterType(types ...ref.Type) error { + for _, t := range types { + celType := maybeForeignType(t) + existing, found := p.revTypeMap[t.TypeName()] + if !found { + p.revTypeMap[t.TypeName()] = celType + continue + } + if !existing.IsEquivalentType(celType) { + return fmt.Errorf("type registration conflict. found: %v, input: %v", existing, celType) + } + if existing.traitMask != celType.traitMask { + return fmt.Errorf( + "type registered with conflicting traits: %v with traits %v, input: %v", + existing.TypeName(), existing.traitMask, celType.traitMask) + } + } + return nil +} + +// NativeToValue converts various "native" types to ref.Val with this specific implementation +// providing support for custom proto-based types. +// +// This method should be the inverse of ref.Val.ConvertToNative. +func (p *Registry) NativeToValue(value any) ref.Val { + if val, found := nativeToValue(p, value); found { + return val + } + switch v := value.(type) { + case proto.Message: + typeName := string(v.ProtoReflect().Descriptor().FullName()) + td, found := p.pbdb.DescribeType(typeName) + if !found { + return NewErr("unknown type: '%s'", typeName) + } + unwrapped, isUnwrapped, err := td.MaybeUnwrap(v) + if err != nil { + return UnsupportedRefValConversionErr(v) + } + if isUnwrapped { + return p.NativeToValue(unwrapped) + } + typeVal, found := p.FindIdent(typeName) + if !found { + return NewErr("unknown type: '%s'", typeName) + } + return NewObject(p, td, typeVal, v) + case *pb.Map: + return NewProtoMap(p, v) + case protoreflect.List: + return NewProtoList(p, v) + case protoreflect.Message: + return p.NativeToValue(v.Interface()) + case protoreflect.Value: + return p.NativeToValue(v.Interface()) + } + return UnsupportedRefValConversionErr(value) +} + +func (p *Registry) registerAllTypes(fd *pb.FileDescription) error { + for _, typeName := range fd.GetTypeNames() { + // skip well-known type names since they're automatically sanitized + // during NewObjectType() calls. + if _, found := checkedWellKnowns[typeName]; found { + continue + } + err := p.RegisterType(NewObjectTypeValue(typeName)) + if err != nil { + return err + } + } + return nil +} + +func fieldDescToCELType(field *pb.FieldDescription) *Type { + if field.IsMap() { + return NewMapType( + singularFieldDescToCELType(field.KeyType), + singularFieldDescToCELType(field.ValueType)) + } + if field.IsList() { + return NewListType(singularFieldDescToCELType(field)) + } + return singularFieldDescToCELType(field) +} + +func singularFieldDescToCELType(field *pb.FieldDescription) *Type { + if field.IsMessage() { + return NewObjectType(string(field.Descriptor().Message().FullName())) + } + if field.IsEnum() { + return IntType + } + return ProtoCELPrimitives[field.ProtoKind()] +} + +// defaultTypeAdapter converts go native types to CEL values. +type defaultTypeAdapter struct{} + +var ( + // DefaultTypeAdapter adapts canonical CEL types from their equivalent Go values. + DefaultTypeAdapter = &defaultTypeAdapter{} +) + +// NativeToValue implements the ref.TypeAdapter interface. +func (a *defaultTypeAdapter) NativeToValue(value any) ref.Val { + if val, found := nativeToValue(a, value); found { + return val + } + return UnsupportedRefValConversionErr(value) +} + +// nativeToValue returns the converted (ref.Val, true) of a conversion is found, +// otherwise (nil, false) +func nativeToValue(a Adapter, value any) (ref.Val, bool) { + switch v := value.(type) { + case nil: + return NullValue, true + case *Bool: + if v != nil { + return *v, true + } + case *Bytes: + if v != nil { + return *v, true + } + case *Double: + if v != nil { + return *v, true + } + case *Int: + if v != nil { + return *v, true + } + case *String: + if v != nil { + return *v, true + } + case *Uint: + if v != nil { + return *v, true + } + case bool: + return Bool(v), true + case int: + return Int(v), true + case int32: + return Int(v), true + case int64: + return Int(v), true + case uint: + return Uint(v), true + case uint32: + return Uint(v), true + case uint64: + return Uint(v), true + case float32: + return Double(v), true + case float64: + return Double(v), true + case string: + return String(v), true + case *dpb.Duration: + return Duration{Duration: v.AsDuration()}, true + case time.Duration: + return Duration{Duration: v}, true + case *tpb.Timestamp: + return Timestamp{Time: v.AsTime()}, true + case time.Time: + return Timestamp{Time: v}, true + case *bool: + if v != nil { + return Bool(*v), true + } + case *float32: + if v != nil { + return Double(*v), true + } + case *float64: + if v != nil { + return Double(*v), true + } + case *int: + if v != nil { + return Int(*v), true + } + case *int32: + if v != nil { + return Int(*v), true + } + case *int64: + if v != nil { + return Int(*v), true + } + case *string: + if v != nil { + return String(*v), true + } + case *uint: + if v != nil { + return Uint(*v), true + } + case *uint32: + if v != nil { + return Uint(*v), true + } + case *uint64: + if v != nil { + return Uint(*v), true + } + case []byte: + return Bytes(v), true + // specializations for common lists types. + case []string: + return NewStringList(a, v), true + case []ref.Val: + return NewRefValList(a, v), true + // specializations for common map types. + case map[string]string: + return NewStringStringMap(a, v), true + case map[string]any: + return NewStringInterfaceMap(a, v), true + case map[ref.Val]ref.Val: + return NewRefValMap(a, v), true + // additional specializations may be added upon request / need. + case *anypb.Any: + if v == nil { + return UnsupportedRefValConversionErr(v), true + } + unpackedAny, err := v.UnmarshalNew() + if err != nil { + return NewErr("anypb.UnmarshalNew() failed for type %q: %v", v.GetTypeUrl(), err), true + } + return a.NativeToValue(unpackedAny), true + case *structpb.NullValue, structpb.NullValue: + return NullValue, true + case *structpb.ListValue: + return NewJSONList(a, v), true + case *structpb.Struct: + return NewJSONStruct(a, v), true + case ref.Val: + return v, true + case protoreflect.EnumNumber: + return Int(v), true + case proto.Message: + if v == nil { + return UnsupportedRefValConversionErr(v), true + } + typeName := string(v.ProtoReflect().Descriptor().FullName()) + td, found := pb.DefaultDb.DescribeType(typeName) + if !found { + return nil, false + } + val, unwrapped, err := td.MaybeUnwrap(v) + if err != nil { + return UnsupportedRefValConversionErr(v), true + } + if !unwrapped { + return nil, false + } + return a.NativeToValue(val), true + // Note: dynamicpb.Message implements the proto.Message _and_ protoreflect.Message interfaces + // which means that this case must appear after handling a proto.Message type. + case protoreflect.Message: + return a.NativeToValue(v.Interface()), true + default: + refValue := reflect.ValueOf(v) + if refValue.Kind() == reflect.Ptr { + if refValue.IsNil() { + return UnsupportedRefValConversionErr(v), true + } + refValue = refValue.Elem() + } + refKind := refValue.Kind() + switch refKind { + case reflect.Array, reflect.Slice: + if refValue.Type().Elem() == reflect.TypeOf(byte(0)) { + if refValue.CanAddr() { + return Bytes(refValue.Bytes()), true + } + tmp := reflect.New(refValue.Type()) + tmp.Elem().Set(refValue) + return Bytes(tmp.Elem().Bytes()), true + } + return NewDynamicList(a, v), true + case reflect.Map: + return NewDynamicMap(a, v), true + // type aliases of primitive types cannot be asserted as that type, but rather need + // to be downcast to int32 before being converted to a CEL representation. + case reflect.Bool: + boolTupe := reflect.TypeOf(false) + return Bool(refValue.Convert(boolTupe).Interface().(bool)), true + case reflect.Int: + intType := reflect.TypeOf(int(0)) + return Int(refValue.Convert(intType).Interface().(int)), true + case reflect.Int8: + intType := reflect.TypeOf(int8(0)) + return Int(refValue.Convert(intType).Interface().(int8)), true + case reflect.Int16: + intType := reflect.TypeOf(int16(0)) + return Int(refValue.Convert(intType).Interface().(int16)), true + case reflect.Int32: + intType := reflect.TypeOf(int32(0)) + return Int(refValue.Convert(intType).Interface().(int32)), true + case reflect.Int64: + intType := reflect.TypeOf(int64(0)) + return Int(refValue.Convert(intType).Interface().(int64)), true + case reflect.Uint: + uintType := reflect.TypeOf(uint(0)) + return Uint(refValue.Convert(uintType).Interface().(uint)), true + case reflect.Uint8: + uintType := reflect.TypeOf(uint8(0)) + return Uint(refValue.Convert(uintType).Interface().(uint8)), true + case reflect.Uint16: + uintType := reflect.TypeOf(uint16(0)) + return Uint(refValue.Convert(uintType).Interface().(uint16)), true + case reflect.Uint32: + uintType := reflect.TypeOf(uint32(0)) + return Uint(refValue.Convert(uintType).Interface().(uint32)), true + case reflect.Uint64: + uintType := reflect.TypeOf(uint64(0)) + return Uint(refValue.Convert(uintType).Interface().(uint64)), true + case reflect.Float32: + doubleType := reflect.TypeOf(float32(0)) + return Double(refValue.Convert(doubleType).Interface().(float32)), true + case reflect.Float64: + doubleType := reflect.TypeOf(float64(0)) + return Double(refValue.Convert(doubleType).Interface().(float64)), true + case reflect.String: + stringType := reflect.TypeOf("") + return String(refValue.Convert(stringType).Interface().(string)), true + } + } + return nil, false +} + +func msgSetField(target protoreflect.Message, field *pb.FieldDescription, val ref.Val) error { + if field.IsList() { + lv := target.NewField(field.Descriptor()) + list, ok := val.(traits.Lister) + if !ok { + return unsupportedTypeConversionError(field, val) + } + err := msgSetListField(lv.List(), field, list) + if err != nil { + return err + } + target.Set(field.Descriptor(), lv) + return nil + } + if field.IsMap() { + mv := target.NewField(field.Descriptor()) + mp, ok := val.(traits.Mapper) + if !ok { + return unsupportedTypeConversionError(field, val) + } + err := msgSetMapField(mv.Map(), field, mp) + if err != nil { + return err + } + target.Set(field.Descriptor(), mv) + return nil + } + v, err := val.ConvertToNative(field.ReflectType()) + if err != nil { + return fieldTypeConversionError(field, err) + } + if v == nil { + return nil + } + switch pv := v.(type) { + case proto.Message: + v = pv.ProtoReflect() + } + target.Set(field.Descriptor(), protoreflect.ValueOf(v)) + return nil +} + +func msgSetListField(target protoreflect.List, listField *pb.FieldDescription, listVal traits.Lister) error { + elemReflectType := listField.ReflectType().Elem() + for i := Int(0); i < listVal.Size().(Int); i++ { + elem := listVal.Get(i) + elemVal, err := elem.ConvertToNative(elemReflectType) + if err != nil { + return fieldTypeConversionError(listField, err) + } + if elemVal == nil { + continue + } + switch ev := elemVal.(type) { + case proto.Message: + elemVal = ev.ProtoReflect() + } + target.Append(protoreflect.ValueOf(elemVal)) + } + return nil +} + +func msgSetMapField(target protoreflect.Map, mapField *pb.FieldDescription, mapVal traits.Mapper) error { + targetKeyType := mapField.KeyType.ReflectType() + targetValType := mapField.ValueType.ReflectType() + it := mapVal.Iterator() + for it.HasNext() == True { + key := it.Next() + val := mapVal.Get(key) + k, err := key.ConvertToNative(targetKeyType) + if err != nil { + return fieldTypeConversionError(mapField, err) + } + v, err := val.ConvertToNative(targetValType) + if err != nil { + return fieldTypeConversionError(mapField, err) + } + if v == nil { + continue + } + switch pv := v.(type) { + case proto.Message: + v = pv.ProtoReflect() + } + target.Set(protoreflect.ValueOf(k).MapKey(), protoreflect.ValueOf(v)) + } + return nil +} + +func unsupportedTypeConversionError(field *pb.FieldDescription, val ref.Val) error { + msgName := field.Descriptor().ContainingMessage().FullName() + return fmt.Errorf("unsupported field type for %v.%v: %v", msgName, field.Name(), val.Type()) +} + +func fieldTypeConversionError(field *pb.FieldDescription, err error) error { + msgName := field.Descriptor().ContainingMessage().FullName() + return fmt.Errorf("field type conversion error for %v.%v value type: %v", msgName, field.Name(), err) +} + +var ( + // ProtoCELPrimitives provides a map from the protoreflect Kind to the equivalent CEL type. + ProtoCELPrimitives = map[protoreflect.Kind]*Type{ + protoreflect.BoolKind: BoolType, + protoreflect.BytesKind: BytesType, + protoreflect.DoubleKind: DoubleType, + protoreflect.FloatKind: DoubleType, + protoreflect.Int32Kind: IntType, + protoreflect.Int64Kind: IntType, + protoreflect.Sint32Kind: IntType, + protoreflect.Sint64Kind: IntType, + protoreflect.Uint32Kind: UintType, + protoreflect.Uint64Kind: UintType, + protoreflect.Fixed32Kind: UintType, + protoreflect.Fixed64Kind: UintType, + protoreflect.Sfixed32Kind: IntType, + protoreflect.Sfixed64Kind: IntType, + protoreflect.StringKind: StringType, + } +) diff --git a/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel new file mode 100644 index 00000000..79330c33 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/ref/BUILD.bazel @@ -0,0 +1,20 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "provider.go", + "reference.go", + ], + importpath = "github.com/google/cel-go/common/types/ref", + deps = [ + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/types/ref/provider.go b/vendor/github.com/google/cel-go/common/types/ref/provider.go new file mode 100644 index 00000000..b9820023 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/ref/provider.go @@ -0,0 +1,102 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ref + +import ( + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// TypeProvider specifies functions for creating new object instances and for +// resolving enum values by name. +// +// Deprecated: use types.Provider +type TypeProvider interface { + // EnumValue returns the numeric value of the given enum value name. + EnumValue(enumName string) Val + + // FindIdent takes a qualified identifier name and returns a Value if one exists. + FindIdent(identName string) (Val, bool) + + // FindType looks up the Type given a qualified typeName. Returns false if not found. + FindType(typeName string) (*exprpb.Type, bool) + + // FieldFieldType returns the field type for a checked type value. Returns false if + // the field could not be found. + FindFieldType(messageType, fieldName string) (*FieldType, bool) + + // NewValue creates a new type value from a qualified name and map of field name + // to value. + // + // Note, for each value, the Val.ConvertToNative function will be invoked to convert + // the Val to the field's native type. If an error occurs during conversion, the + // NewValue will be a types.Err. + NewValue(typeName string, fields map[string]Val) Val +} + +// TypeAdapter converts native Go values of varying type and complexity to equivalent CEL values. +// +// Deprecated: use types.Adapter +type TypeAdapter interface { + // NativeToValue converts the input `value` to a CEL `ref.Val`. + NativeToValue(value any) Val +} + +// TypeRegistry allows third-parties to add custom types to CEL. Not all `TypeProvider` +// implementations support type-customization, so these features are optional. However, a +// `TypeRegistry` should be a `TypeProvider` and a `TypeAdapter` to ensure that types +// which are registered can be converted to CEL representations. +// +// Deprecated: use types.Registry +type TypeRegistry interface { + TypeAdapter + TypeProvider + + // RegisterDescriptor registers the contents of a protocol buffer `FileDescriptor`. + RegisterDescriptor(fileDesc protoreflect.FileDescriptor) error + + // RegisterMessage registers a protocol buffer message and its dependencies. + RegisterMessage(message proto.Message) error + + // RegisterType registers a type value with the provider which ensures the + // provider is aware of how to map the type to an identifier. + // + // If a type is provided more than once with an alternative definition, the + // call will result in an error. + RegisterType(types ...Type) error +} + +// FieldType represents a field's type value and whether that field supports +// presence detection. +// +// Deprecated: use types.FieldType +type FieldType struct { + // Type of the field as a protobuf type value. + Type *exprpb.Type + + // IsSet indicates whether the field is set on an input object. + IsSet FieldTester + + // GetFrom retrieves the field value on the input object, if set. + GetFrom FieldGetter +} + +// FieldTester is used to test field presence on an input object. +type FieldTester func(target any) bool + +// FieldGetter is used to get the field value from an input object, if set. +type FieldGetter func(target any) (any, error) diff --git a/vendor/github.com/google/cel-go/common/types/ref/reference.go b/vendor/github.com/google/cel-go/common/types/ref/reference.go new file mode 100644 index 00000000..e0d58145 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/ref/reference.go @@ -0,0 +1,63 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ref contains the reference interfaces used throughout the types components. +package ref + +import ( + "reflect" +) + +// Type interface indicate the name of a given type. +type Type interface { + // HasTrait returns whether the type has a given trait associated with it. + // + // See common/types/traits/traits.go for a list of supported traits. + HasTrait(trait int) bool + + // TypeName returns the qualified type name of the type. + // + // The type name is also used as the type's identifier name at type-check and interpretation time. + TypeName() string +} + +// Val interface defines the functions supported by all expression values. +// Val implementations may specialize the behavior of the value through the addition of traits. +type Val interface { + // ConvertToNative converts the Value to a native Go struct according to the + // reflected type description, or error if the conversion is not feasible. + // + // The ConvertToNative method is intended to be used to support conversion between CEL types + // and native types during object creation expressions or by clients who need to adapt the, + // returned CEL value into an equivalent Go value instance. + // + // When implementing or using ConvertToNative, the following guidelines apply: + // - Use ConvertToNative when marshalling CEL evaluation results to native types. + // - Do not use ConvertToNative within CEL extension functions. + // - Document whether your implementation supports non-CEL field types, such as Go or Protobuf. + ConvertToNative(typeDesc reflect.Type) (any, error) + + // ConvertToType supports type conversions between CEL value types supported by the expression language. + ConvertToType(typeValue Type) Val + + // Equal returns true if the `other` value has the same type and content as the implementing struct. + Equal(other Val) Val + + // Type returns the TypeValue of the value. + Type() Type + + // Value returns the raw value of the instance which may not be directly compatible with the expression + // language types. + Value() any +} diff --git a/vendor/github.com/google/cel-go/common/types/string.go b/vendor/github.com/google/cel-go/common/types/string.go new file mode 100644 index 00000000..3a93743f --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/string.go @@ -0,0 +1,226 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// String type implementation which supports addition, comparison, matching, +// and size functions. +type String string + +var ( + stringOneArgOverloads = map[string]func(ref.Val, ref.Val) ref.Val{ + overloads.Contains: StringContains, + overloads.EndsWith: StringEndsWith, + overloads.StartsWith: StringStartsWith, + } + + stringWrapperType = reflect.TypeOf(&wrapperspb.StringValue{}) +) + +// Add implements traits.Adder.Add. +func (s String) Add(other ref.Val) ref.Val { + otherString, ok := other.(String) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return s + otherString +} + +// Compare implements traits.Comparer.Compare. +func (s String) Compare(other ref.Val) ref.Val { + otherString, ok := other.(String) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + return Int(strings.Compare(s.Value().(string), otherString.Value().(string))) +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (s String) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.String: + return reflect.ValueOf(s).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.String(string(s))) + case jsonValueType: + // Convert to a protobuf representation of a JSON String. + return structpb.NewStringValue(string(s)), nil + case stringWrapperType: + // Convert to a wrapperspb.StringValue. + return wrapperspb.String(string(s)), nil + } + if typeDesc.Elem().Kind() == reflect.String { + p := s.Value().(string) + return &p, nil + } + case reflect.Interface: + sv := s.Value() + if reflect.TypeOf(sv).Implements(typeDesc) { + return sv, nil + } + if reflect.TypeOf(s).Implements(typeDesc) { + return s, nil + } + } + return nil, fmt.Errorf( + "unsupported native conversion from string to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (s String) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + if n, err := strconv.ParseInt(s.Value().(string), 10, 64); err == nil { + return Int(n) + } + case UintType: + if n, err := strconv.ParseUint(s.Value().(string), 10, 64); err == nil { + return Uint(n) + } + case DoubleType: + if n, err := strconv.ParseFloat(s.Value().(string), 64); err == nil { + return Double(n) + } + case BoolType: + if b, err := strconv.ParseBool(s.Value().(string)); err == nil { + return Bool(b) + } + case BytesType: + return Bytes(s) + case DurationType: + if d, err := time.ParseDuration(s.Value().(string)); err == nil { + return durationOf(d) + } + case TimestampType: + if t, err := time.Parse(time.RFC3339, s.Value().(string)); err == nil { + if t.Unix() < minUnixTime || t.Unix() > maxUnixTime { + return celErrTimestampOverflow + } + return timestampOf(t) + } + case StringType: + return s + case TypeType: + return StringType + } + return NewErr("type conversion error from '%s' to '%s'", StringType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (s String) Equal(other ref.Val) ref.Val { + otherString, ok := other.(String) + return Bool(ok && s == otherString) +} + +// IsZeroValue returns true if the string is empty. +func (s String) IsZeroValue() bool { + return len(s) == 0 +} + +// Match implements traits.Matcher.Match. +func (s String) Match(pattern ref.Val) ref.Val { + pat, ok := pattern.(String) + if !ok { + return MaybeNoSuchOverloadErr(pattern) + } + matched, err := regexp.MatchString(pat.Value().(string), s.Value().(string)) + if err != nil { + return &Err{error: err} + } + return Bool(matched) +} + +// Receive implements traits.Receiver.Receive. +func (s String) Receive(function string, overload string, args []ref.Val) ref.Val { + switch len(args) { + case 1: + if f, found := stringOneArgOverloads[function]; found { + return f(s, args[0]) + } + } + return NoSuchOverloadErr() +} + +// Size implements traits.Sizer.Size. +func (s String) Size() ref.Val { + return Int(len([]rune(s.Value().(string)))) +} + +// Type implements ref.Val.Type. +func (s String) Type() ref.Type { + return StringType +} + +// Value implements ref.Val.Value. +func (s String) Value() any { + return string(s) +} + +// StringContains returns whether the string contains a substring. +func StringContains(s, sub ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } + subStr, ok := sub.(String) + if !ok { + return MaybeNoSuchOverloadErr(sub) + } + return Bool(strings.Contains(string(str), string(subStr))) +} + +// StringEndsWith returns whether the target string contains the input suffix. +func StringEndsWith(s, suf ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } + sufStr, ok := suf.(String) + if !ok { + return MaybeNoSuchOverloadErr(suf) + } + return Bool(strings.HasSuffix(string(str), string(sufStr))) +} + +// StringStartsWith returns whether the target string contains the input prefix. +func StringStartsWith(s, pre ref.Val) ref.Val { + str, ok := s.(String) + if !ok { + return MaybeNoSuchOverloadErr(s) + } + preStr, ok := pre.(String) + if !ok { + return MaybeNoSuchOverloadErr(pre) + } + return Bool(strings.HasPrefix(string(str), string(preStr))) +} diff --git a/vendor/github.com/google/cel-go/common/types/timestamp.go b/vendor/github.com/google/cel-go/common/types/timestamp.go new file mode 100644 index 00000000..33acdea8 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/timestamp.go @@ -0,0 +1,311 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + tpb "google.golang.org/protobuf/types/known/timestamppb" +) + +// Timestamp type implementation which supports add, compare, and subtract +// operations. Timestamps are also capable of participating in dynamic +// function dispatch to instance methods. +type Timestamp struct { + time.Time +} + +func timestampOf(t time.Time) Timestamp { + // Note that this function does not validate that time.Time is in our supported range. + return Timestamp{Time: t} +} + +const ( + // The number of seconds between year 1 and year 1970. This is borrowed from + // https://golang.org/src/time/time.go. + unixToInternal int64 = (1969*365 + 1969/4 - 1969/100 + 1969/400) * (60 * 60 * 24) + + // Number of seconds between `0001-01-01T00:00:00Z` and the Unix epoch. + minUnixTime int64 = -62135596800 + // Number of seconds between `9999-12-31T23:59:59.999999999Z` and the Unix epoch. + maxUnixTime int64 = 253402300799 +) + +// Add implements traits.Adder.Add. +func (t Timestamp) Add(other ref.Val) ref.Val { + switch other.Type() { + case DurationType: + return other.(Duration).Add(t) + } + return MaybeNoSuchOverloadErr(other) +} + +// Compare implements traits.Comparer.Compare. +func (t Timestamp) Compare(other ref.Val) ref.Val { + if TimestampType != other.Type() { + return MaybeNoSuchOverloadErr(other) + } + ts1 := t.Time + ts2 := other.(Timestamp).Time + switch { + case ts1.Before(ts2): + return IntNegOne + case ts1.After(ts2): + return IntOne + default: + return IntZero + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t Timestamp) ConvertToNative(typeDesc reflect.Type) (any, error) { + // If the timestamp is already assignable to the desired type return it. + if reflect.TypeOf(t.Time).AssignableTo(typeDesc) { + return t.Time, nil + } + if reflect.TypeOf(t).AssignableTo(typeDesc) { + return t, nil + } + switch typeDesc { + case anyValueType: + // Pack the underlying time as a tpb.Timestamp into an Any value. + return anypb.New(tpb.New(t.Time)) + case jsonValueType: + // CEL follows the proto3 to JSON conversion which formats as an RFC 3339 encoded JSON + // string. + v := t.ConvertToType(StringType) + if IsError(v) { + return nil, v.(*Err) + } + return structpb.NewStringValue(string(v.(String))), nil + case timestampValueType: + // Unwrap the underlying tpb.Timestamp. + return tpb.New(t.Time), nil + } + return nil, fmt.Errorf("type conversion error from 'Timestamp' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t Timestamp) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case StringType: + return String(t.Format(time.RFC3339Nano)) + case IntType: + // Return the Unix time in seconds since 1970 + return Int(t.Unix()) + case TimestampType: + return t + case TypeType: + return TimestampType + } + return NewErr("type conversion error from '%s' to '%s'", TimestampType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (t Timestamp) Equal(other ref.Val) ref.Val { + otherTime, ok := other.(Timestamp) + return Bool(ok && t.Time.Equal(otherTime.Time)) +} + +// IsZeroValue returns true if the timestamp is epoch 0. +func (t Timestamp) IsZeroValue() bool { + return t.IsZero() +} + +// Receive implements traits.Receiver.Receive. +func (t Timestamp) Receive(function string, overload string, args []ref.Val) ref.Val { + switch len(args) { + case 0: + if f, found := timestampZeroArgOverloads[function]; found { + return f(t.Time) + } + case 1: + if f, found := timestampOneArgOverloads[function]; found { + return f(t.Time, args[0]) + } + } + return NoSuchOverloadErr() +} + +// Subtract implements traits.Subtractor.Subtract. +func (t Timestamp) Subtract(subtrahend ref.Val) ref.Val { + switch subtrahend.Type() { + case DurationType: + dur := subtrahend.(Duration) + val, err := subtractTimeDurationChecked(t.Time, dur.Duration) + if err != nil { + return WrapErr(err) + } + return timestampOf(val) + case TimestampType: + t2 := subtrahend.(Timestamp).Time + val, err := subtractTimeChecked(t.Time, t2) + if err != nil { + return WrapErr(err) + } + return durationOf(val) + } + return MaybeNoSuchOverloadErr(subtrahend) +} + +// Type implements ref.Val.Type. +func (t Timestamp) Type() ref.Type { + return TimestampType +} + +// Value implements ref.Val.Value. +func (t Timestamp) Value() any { + return t.Time +} + +var ( + timestampValueType = reflect.TypeOf(&tpb.Timestamp{}) + + timestampZeroArgOverloads = map[string]func(time.Time) ref.Val{ + overloads.TimeGetFullYear: timestampGetFullYear, + overloads.TimeGetMonth: timestampGetMonth, + overloads.TimeGetDayOfYear: timestampGetDayOfYear, + overloads.TimeGetDate: timestampGetDayOfMonthOneBased, + overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBased, + overloads.TimeGetDayOfWeek: timestampGetDayOfWeek, + overloads.TimeGetHours: timestampGetHours, + overloads.TimeGetMinutes: timestampGetMinutes, + overloads.TimeGetSeconds: timestampGetSeconds, + overloads.TimeGetMilliseconds: timestampGetMilliseconds} + + timestampOneArgOverloads = map[string]func(time.Time, ref.Val) ref.Val{ + overloads.TimeGetFullYear: timestampGetFullYearWithTz, + overloads.TimeGetMonth: timestampGetMonthWithTz, + overloads.TimeGetDayOfYear: timestampGetDayOfYearWithTz, + overloads.TimeGetDate: timestampGetDayOfMonthOneBasedWithTz, + overloads.TimeGetDayOfMonth: timestampGetDayOfMonthZeroBasedWithTz, + overloads.TimeGetDayOfWeek: timestampGetDayOfWeekWithTz, + overloads.TimeGetHours: timestampGetHoursWithTz, + overloads.TimeGetMinutes: timestampGetMinutesWithTz, + overloads.TimeGetSeconds: timestampGetSecondsWithTz, + overloads.TimeGetMilliseconds: timestampGetMillisecondsWithTz} +) + +type timestampVisitor func(time.Time) ref.Val + +func timestampGetFullYear(t time.Time) ref.Val { + return Int(t.Year()) +} +func timestampGetMonth(t time.Time) ref.Val { + // CEL spec indicates that the month should be 0-based, but the Time value + // for Month() is 1-based. + return Int(t.Month() - 1) +} +func timestampGetDayOfYear(t time.Time) ref.Val { + return Int(t.YearDay() - 1) +} +func timestampGetDayOfMonthZeroBased(t time.Time) ref.Val { + return Int(t.Day() - 1) +} +func timestampGetDayOfMonthOneBased(t time.Time) ref.Val { + return Int(t.Day()) +} +func timestampGetDayOfWeek(t time.Time) ref.Val { + return Int(t.Weekday()) +} +func timestampGetHours(t time.Time) ref.Val { + return Int(t.Hour()) +} +func timestampGetMinutes(t time.Time) ref.Val { + return Int(t.Minute()) +} +func timestampGetSeconds(t time.Time) ref.Val { + return Int(t.Second()) +} +func timestampGetMilliseconds(t time.Time) ref.Val { + return Int(t.Nanosecond() / 1000000) +} + +func timestampGetFullYearWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetFullYear)(t) +} +func timestampGetMonthWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetMonth)(t) +} +func timestampGetDayOfYearWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfYear)(t) +} +func timestampGetDayOfMonthZeroBasedWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfMonthZeroBased)(t) +} +func timestampGetDayOfMonthOneBasedWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfMonthOneBased)(t) +} +func timestampGetDayOfWeekWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetDayOfWeek)(t) +} +func timestampGetHoursWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetHours)(t) +} +func timestampGetMinutesWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetMinutes)(t) +} +func timestampGetSecondsWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetSeconds)(t) +} +func timestampGetMillisecondsWithTz(t time.Time, tz ref.Val) ref.Val { + return timeZone(tz, timestampGetMilliseconds)(t) +} + +func timeZone(tz ref.Val, visitor timestampVisitor) timestampVisitor { + return func(t time.Time) ref.Val { + if StringType != tz.Type() { + return MaybeNoSuchOverloadErr(tz) + } + val := string(tz.(String)) + ind := strings.Index(val, ":") + if ind == -1 { + loc, err := time.LoadLocation(val) + if err != nil { + return WrapErr(err) + } + return visitor(t.In(loc)) + } + + // If the input is not the name of a timezone (for example, 'US/Central'), it should be a numerical offset from UTC + // in the format ^(+|-)(0[0-9]|1[0-4]):[0-5][0-9]$. The numerical input is parsed in terms of hours and minutes. + hr, err := strconv.Atoi(string(val[0:ind])) + if err != nil { + return WrapErr(err) + } + min, err := strconv.Atoi(string(val[ind+1:])) + if err != nil { + return WrapErr(err) + } + var offset int + if string(val[0]) == "-" { + offset = hr*60 - min + } else { + offset = hr*60 + min + } + secondsEastOfUTC := int((time.Duration(offset) * time.Minute).Seconds()) + timezone := time.FixedZone("", secondsEastOfUTC) + return visitor(t.In(timezone)) + } +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel new file mode 100644 index 00000000..b19eb830 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/BUILD.bazel @@ -0,0 +1,29 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "comparer.go", + "container.go", + "field_tester.go", + "indexer.go", + "iterator.go", + "lister.go", + "mapper.go", + "matcher.go", + "math.go", + "receiver.go", + "sizer.go", + "traits.go", + "zeroer.go", + ], + importpath = "github.com/google/cel-go/common/types/traits", + deps = [ + "//common/types/ref:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/common/types/traits/comparer.go b/vendor/github.com/google/cel-go/common/types/traits/comparer.go new file mode 100644 index 00000000..b531d9ae --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/comparer.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Comparer interface for ordering comparisons between values in order to +// support '<', '<=', '>=', '>' overloads. +type Comparer interface { + // Compare this value to the input other value, returning an Int: + // + // this < other -> Int(-1) + // this == other -> Int(0) + // this > other -> Int(1) + // + // If the comparison cannot be made or is not supported, an error should + // be returned. + Compare(other ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/container.go b/vendor/github.com/google/cel-go/common/types/traits/container.go new file mode 100644 index 00000000..cf5c621a --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/container.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Container interface which permits containment tests such as 'a in b'. +type Container interface { + // Contains returns true if the value exists within the object. + Contains(value ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/field_tester.go b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go new file mode 100644 index 00000000..816a9565 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/field_tester.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// FieldTester indicates if a defined field on an object type is set to a +// non-default value. +// +// For use with the `has()` macro. +type FieldTester interface { + // IsSet returns true if the field is defined and set to a non-default + // value. The method will return false if defined and not set, and an error + // if the field is not defined. + IsSet(field ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/indexer.go b/vendor/github.com/google/cel-go/common/types/traits/indexer.go new file mode 100644 index 00000000..662c6836 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/indexer.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Indexer permits random access of elements by index 'a[b()]'. +type Indexer interface { + // Get the value at the specified index or error. + Get(index ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/iterator.go b/vendor/github.com/google/cel-go/common/types/traits/iterator.go new file mode 100644 index 00000000..42dd371a --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/iterator.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Iterable aggregate types permit traversal over their elements. +type Iterable interface { + // Iterator returns a new iterator view of the struct. + Iterator() Iterator +} + +// Iterator permits safe traversal over the contents of an aggregate type. +type Iterator interface { + ref.Val + + // HasNext returns true if there are unvisited elements in the Iterator. + HasNext() ref.Val + + // Next returns the next element. + Next() ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/lister.go b/vendor/github.com/google/cel-go/common/types/traits/lister.go new file mode 100644 index 00000000..5cf2593f --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/lister.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Lister interface which aggregates the traits of a list. +type Lister interface { + ref.Val + Adder + Container + Indexer + Iterable + Sizer +} + +// MutableLister interface which emits an immutable result after an intermediate computation. +type MutableLister interface { + Lister + ToImmutableList() Lister +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/mapper.go b/vendor/github.com/google/cel-go/common/types/traits/mapper.go new file mode 100644 index 00000000..2f7c919a --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/mapper.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Mapper interface which aggregates the traits of a maps. +type Mapper interface { + ref.Val + Container + Indexer + Iterable + Sizer + + // Find returns a value, if one exists, for the input key. + // + // If the key is not found the function returns (nil, false). + // If the input key is not valid for the map, or is Err or Unknown the function returns + // (Unknown|Err, false). + Find(key ref.Val) (ref.Val, bool) +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/matcher.go b/vendor/github.com/google/cel-go/common/types/traits/matcher.go new file mode 100644 index 00000000..085dc94f --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/matcher.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Matcher interface for supporting 'matches()' overloads. +type Matcher interface { + // Match returns true if the pattern matches the current value. + Match(pattern ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/math.go b/vendor/github.com/google/cel-go/common/types/traits/math.go new file mode 100644 index 00000000..86d5b913 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/math.go @@ -0,0 +1,62 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Adder interface to support '+' operator overloads. +type Adder interface { + // Add returns a combination of the current value and other value. + // + // If the other value is an unsupported type, an error is returned. + Add(other ref.Val) ref.Val +} + +// Divider interface to support '/' operator overloads. +type Divider interface { + // Divide returns the result of dividing the current value by the input + // denominator. + // + // A denominator value of zero results in an error. + Divide(denominator ref.Val) ref.Val +} + +// Modder interface to support '%' operator overloads. +type Modder interface { + // Modulo returns the result of taking the modulus of the current value + // by the denominator. + // + // A denominator value of zero results in an error. + Modulo(denominator ref.Val) ref.Val +} + +// Multiplier interface to support '*' operator overloads. +type Multiplier interface { + // Multiply returns the result of multiplying the current and input value. + Multiply(other ref.Val) ref.Val +} + +// Negater interface to support unary '-' and '!' operator overloads. +type Negater interface { + // Negate returns the complement of the current value. + Negate() ref.Val +} + +// Subtractor interface to support binary '-' operator overloads. +type Subtractor interface { + // Subtract returns the result of subtracting the input from the current + // value. + Subtract(subtrahend ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/receiver.go b/vendor/github.com/google/cel-go/common/types/traits/receiver.go new file mode 100644 index 00000000..8f41db45 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/receiver.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import "github.com/google/cel-go/common/types/ref" + +// Receiver interface for routing instance method calls within a value. +type Receiver interface { + // Receive accepts a function name, overload id, and arguments and returns + // a value. + Receive(function string, overload string, args []ref.Val) ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/sizer.go b/vendor/github.com/google/cel-go/common/types/traits/sizer.go new file mode 100644 index 00000000..b80d2513 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/sizer.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// Sizer interface for supporting 'size()' overloads. +type Sizer interface { + // Size returns the number of elements or length of the value. + Size() ref.Val +} diff --git a/vendor/github.com/google/cel-go/common/types/traits/traits.go b/vendor/github.com/google/cel-go/common/types/traits/traits.go new file mode 100644 index 00000000..6da3e6a3 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/traits.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package traits defines interfaces that a type may implement to participate +// in operator overloads and function dispatch. +package traits + +const ( + // AdderType types provide a '+' operator overload. + AdderType = 1 << iota + + // ComparerType types support ordering comparisons '<', '<=', '>', '>='. + ComparerType + + // ContainerType types support 'in' operations. + ContainerType + + // DividerType types support '/' operations. + DividerType + + // FieldTesterType types support the detection of field value presence. + FieldTesterType + + // IndexerType types support index access with dynamic values. + IndexerType + + // IterableType types can be iterated over in comprehensions. + IterableType + + // IteratorType types support iterator semantics. + IteratorType + + // MatcherType types support pattern matching via 'matches' method. + MatcherType + + // ModderType types support modulus operations '%' + ModderType + + // MultiplierType types support '*' operations. + MultiplierType + + // NegatorType types support either negation via '!' or '-' + NegatorType + + // ReceiverType types support dynamic dispatch to instance methods. + ReceiverType + + // SizerType types support the size() method. + SizerType + + // SubtractorType type support '-' operations. + SubtractorType +) diff --git a/vendor/github.com/google/cel-go/common/types/traits/zeroer.go b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go new file mode 100644 index 00000000..0b7c830a --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/traits/zeroer.go @@ -0,0 +1,21 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package traits + +// Zeroer interface for testing whether a CEL value is a zero value for its type. +type Zeroer interface { + // IsZeroValue indicates whether the object is the zero value for the type. + IsZeroValue() bool +} diff --git a/vendor/github.com/google/cel-go/common/types/types.go b/vendor/github.com/google/cel-go/common/types/types.go new file mode 100644 index 00000000..6c3d5f71 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/types.go @@ -0,0 +1,823 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "reflect" + "strings" + + chkdecls "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" +) + +// Kind indicates a CEL type's kind which is used to differentiate quickly between simple +// and complex types. +type Kind uint + +const ( + // UnspecifiedKind is returned when the type is nil or its kind is not specified. + UnspecifiedKind Kind = iota + + // DynKind represents a dynamic type. This kind only exists at type-check time. + DynKind + + // AnyKind represents a google.protobuf.Any type. This kind only exists at type-check time. + // Prefer DynKind to AnyKind as AnyKind has a specific meaning which is based on protobuf + // well-known types. + AnyKind + + // BoolKind represents a boolean type. + BoolKind + + // BytesKind represents a bytes type. + BytesKind + + // DoubleKind represents a double type. + DoubleKind + + // DurationKind represents a CEL duration type. + DurationKind + + // ErrorKind represents a CEL error type. + ErrorKind + + // IntKind represents an integer type. + IntKind + + // ListKind represents a list type. + ListKind + + // MapKind represents a map type. + MapKind + + // NullTypeKind represents a null type. + NullTypeKind + + // OpaqueKind represents an abstract type which has no accessible fields. + OpaqueKind + + // StringKind represents a string type. + StringKind + + // StructKind represents a structured object with typed fields. + StructKind + + // TimestampKind represents a a CEL time type. + TimestampKind + + // TypeKind represents the CEL type. + TypeKind + + // TypeParamKind represents a parameterized type whose type name will be resolved at type-check time, if possible. + TypeParamKind + + // UintKind represents a uint type. + UintKind + + // UnknownKind represents an unknown value type. + UnknownKind +) + +var ( + // AnyType represents the google.protobuf.Any type. + AnyType = &Type{ + kind: AnyKind, + runtimeTypeName: "google.protobuf.Any", + traitMask: traits.FieldTesterType | + traits.IndexerType, + } + // BoolType represents the bool type. + BoolType = &Type{ + kind: BoolKind, + runtimeTypeName: "bool", + traitMask: traits.ComparerType | + traits.NegatorType, + } + // BytesType represents the bytes type. + BytesType = &Type{ + kind: BytesKind, + runtimeTypeName: "bytes", + traitMask: traits.AdderType | + traits.ComparerType | + traits.SizerType, + } + // DoubleType represents the double type. + DoubleType = &Type{ + kind: DoubleKind, + runtimeTypeName: "double", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.MultiplierType | + traits.NegatorType | + traits.SubtractorType, + } + // DurationType represents the CEL duration type. + DurationType = &Type{ + kind: DurationKind, + runtimeTypeName: "google.protobuf.Duration", + traitMask: traits.AdderType | + traits.ComparerType | + traits.NegatorType | + traits.ReceiverType | + traits.SubtractorType, + } + // DynType represents a dynamic CEL type whose type will be determined at runtime from context. + DynType = &Type{ + kind: DynKind, + runtimeTypeName: "dyn", + } + // ErrorType represents a CEL error value. + ErrorType = &Type{ + kind: ErrorKind, + runtimeTypeName: "error", + } + // IntType represents the int type. + IntType = &Type{ + kind: IntKind, + runtimeTypeName: "int", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.ModderType | + traits.MultiplierType | + traits.NegatorType | + traits.SubtractorType, + } + // ListType represents the runtime list type. + ListType = NewListType(nil) + // MapType represents the runtime map type. + MapType = NewMapType(nil, nil) + // NullType represents the type of a null value. + NullType = &Type{ + kind: NullTypeKind, + runtimeTypeName: "null_type", + } + // StringType represents the string type. + StringType = &Type{ + kind: StringKind, + runtimeTypeName: "string", + traitMask: traits.AdderType | + traits.ComparerType | + traits.MatcherType | + traits.ReceiverType | + traits.SizerType, + } + // TimestampType represents the time type. + TimestampType = &Type{ + kind: TimestampKind, + runtimeTypeName: "google.protobuf.Timestamp", + traitMask: traits.AdderType | + traits.ComparerType | + traits.ReceiverType | + traits.SubtractorType, + } + // TypeType represents a CEL type + TypeType = &Type{ + kind: TypeKind, + runtimeTypeName: "type", + } + // UintType represents a uint type. + UintType = &Type{ + kind: UintKind, + runtimeTypeName: "uint", + traitMask: traits.AdderType | + traits.ComparerType | + traits.DividerType | + traits.ModderType | + traits.MultiplierType | + traits.SubtractorType, + } + // UnknownType represents an unknown value type. + UnknownType = &Type{ + kind: UnknownKind, + runtimeTypeName: "unknown", + } +) + +var _ ref.Type = &Type{} +var _ ref.Val = &Type{} + +// Type holds a reference to a runtime type with an optional type-checked set of type parameters. +type Type struct { + // kind indicates general category of the type. + kind Kind + + // parameters holds the optional type-checked set of type Parameters that are used during static analysis. + parameters []*Type + + // runtimeTypeName indicates the runtime type name of the type. + runtimeTypeName string + + // isAssignableType function determines whether one type is assignable to this type. + // A nil value for the isAssignableType function falls back to equality of kind, runtimeType, and parameters. + isAssignableType func(other *Type) bool + + // isAssignableRuntimeType function determines whether the runtime type (with erasure) is assignable to this type. + // A nil value for the isAssignableRuntimeType function falls back to the equality of the type or type name. + isAssignableRuntimeType func(other ref.Val) bool + + // traitMask is a mask of flags which indicate the capabilities of the type. + traitMask int +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t *Type) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion not supported for 'type'") +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t *Type) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case TypeType: + return TypeType + case StringType: + return String(t.TypeName()) + } + return NewErr("type conversion error from '%s' to '%s'", TypeType, typeVal) +} + +// Equal indicates whether two types have the same runtime type name. +// +// The name Equal is a bit of a misnomer, but for historical reasons, this is the +// runtime behavior. For a more accurate definition see IsType(). +func (t *Type) Equal(other ref.Val) ref.Val { + otherType, ok := other.(ref.Type) + return Bool(ok && t.TypeName() == otherType.TypeName()) +} + +// HasTrait implements the ref.Type interface method. +func (t *Type) HasTrait(trait int) bool { + return trait&t.traitMask == trait +} + +// IsExactType indicates whether the two types are exactly the same. This check also verifies type parameter type names. +func (t *Type) IsExactType(other *Type) bool { + return t.isTypeInternal(other, true) +} + +// IsEquivalentType indicates whether two types are equivalent. This check ignores type parameter type names. +func (t *Type) IsEquivalentType(other *Type) bool { + return t.isTypeInternal(other, false) +} + +// Kind indicates general category of the type. +func (t *Type) Kind() Kind { + if t == nil { + return UnspecifiedKind + } + return t.kind +} + +// isTypeInternal checks whether the two types are equivalent or exactly the same based on the checkTypeParamName flag. +func (t *Type) isTypeInternal(other *Type, checkTypeParamName bool) bool { + if t == nil { + return false + } + if t == other { + return true + } + if t.Kind() != other.Kind() || len(t.Parameters()) != len(other.Parameters()) { + return false + } + if (checkTypeParamName || t.Kind() != TypeParamKind) && t.TypeName() != other.TypeName() { + return false + } + for i, p := range t.Parameters() { + if !p.isTypeInternal(other.Parameters()[i], checkTypeParamName) { + return false + } + } + return true +} + +// IsAssignableType determines whether the current type is type-check assignable from the input fromType. +func (t *Type) IsAssignableType(fromType *Type) bool { + if t == nil { + return false + } + if t.isAssignableType != nil { + return t.isAssignableType(fromType) + } + return t.defaultIsAssignableType(fromType) +} + +// IsAssignableRuntimeType determines whether the current type is runtime assignable from the input runtimeType. +// +// At runtime, parameterized types are erased and so a function which type-checks to support a map(string, string) +// will have a runtime assignable type of a map. +func (t *Type) IsAssignableRuntimeType(val ref.Val) bool { + if t == nil { + return false + } + if t.isAssignableRuntimeType != nil { + return t.isAssignableRuntimeType(val) + } + return t.defaultIsAssignableRuntimeType(val) +} + +// Parameters returns the list of type parameters if set. +// +// For ListKind, Parameters()[0] represents the list element type +// For MapKind, Parameters()[0] represents the map key type, and Parameters()[1] represents the map +// value type. +func (t *Type) Parameters() []*Type { + if t == nil { + return emptyParams + } + return t.parameters +} + +// DeclaredTypeName indicates the fully qualified and parameterized type-check type name. +func (t *Type) DeclaredTypeName() string { + // if the type itself is neither null, nor dyn, but is assignable to null, then it's a wrapper type. + if t.Kind() != NullTypeKind && !t.isDyn() && t.IsAssignableType(NullType) { + return fmt.Sprintf("wrapper(%s)", t.TypeName()) + } + return t.TypeName() +} + +// Type implements the ref.Val interface method. +func (t *Type) Type() ref.Type { + return TypeType +} + +// Value implements the ref.Val interface method. +func (t *Type) Value() any { + return t.TypeName() +} + +// TypeName returns the type-erased fully qualified runtime type name. +// +// TypeName implements the ref.Type interface method. +func (t *Type) TypeName() string { + if t == nil { + return "" + } + return t.runtimeTypeName +} + +// WithTraits creates a copy of the current Type and sets the trait mask to the traits parameter. +// +// This method should be used with Opaque types where the type acts like a container, e.g. vector. +func (t *Type) WithTraits(traits int) *Type { + if t == nil { + return nil + } + return &Type{ + kind: t.kind, + parameters: t.parameters, + runtimeTypeName: t.runtimeTypeName, + isAssignableType: t.isAssignableType, + isAssignableRuntimeType: t.isAssignableRuntimeType, + traitMask: traits, + } +} + +// String returns a human-readable definition of the type name. +func (t *Type) String() string { + if len(t.Parameters()) == 0 { + return t.DeclaredTypeName() + } + params := make([]string, len(t.Parameters())) + for i, p := range t.Parameters() { + params[i] = p.String() + } + return fmt.Sprintf("%s(%s)", t.DeclaredTypeName(), strings.Join(params, ", ")) +} + +// isDyn indicates whether the type is dynamic in any way. +func (t *Type) isDyn() bool { + k := t.Kind() + return k == DynKind || k == AnyKind || k == TypeParamKind +} + +// defaultIsAssignableType provides the standard definition of what it means for one type to be assignable to another +// where any of the following may return a true result: +// - The from types are the same instance +// - The target type is dynamic +// - The fromType has the same kind and type name as the target type, and all parameters of the target type +// +// are IsAssignableType() from the parameters of the fromType. +func (t *Type) defaultIsAssignableType(fromType *Type) bool { + if t == fromType || t.isDyn() { + return true + } + if t.Kind() != fromType.Kind() || + t.TypeName() != fromType.TypeName() || + len(t.Parameters()) != len(fromType.Parameters()) { + return false + } + for i, tp := range t.Parameters() { + fp := fromType.Parameters()[i] + if !tp.IsAssignableType(fp) { + return false + } + } + return true +} + +// defaultIsAssignableRuntimeType inspects the type and in the case of list and map elements, the key and element types +// to determine whether a ref.Val is assignable to the declared type for a function signature. +func (t *Type) defaultIsAssignableRuntimeType(val ref.Val) bool { + valType := val.Type() + // If the current type and value type don't agree, then return + if !(t.isDyn() || t.TypeName() == valType.TypeName()) { + return false + } + switch t.Kind() { + case ListKind: + elemType := t.Parameters()[0] + l := val.(traits.Lister) + if l.Size() == IntZero { + return true + } + it := l.Iterator() + elemVal := it.Next() + return elemType.IsAssignableRuntimeType(elemVal) + case MapKind: + keyType := t.Parameters()[0] + elemType := t.Parameters()[1] + m := val.(traits.Mapper) + if m.Size() == IntZero { + return true + } + it := m.Iterator() + keyVal := it.Next() + elemVal := m.Get(keyVal) + return keyType.IsAssignableRuntimeType(keyVal) && elemType.IsAssignableRuntimeType(elemVal) + } + return true +} + +// NewListType creates an instances of a list type value with the provided element type. +func NewListType(elemType *Type) *Type { + return &Type{ + kind: ListKind, + parameters: []*Type{elemType}, + runtimeTypeName: "list", + traitMask: traits.AdderType | + traits.ContainerType | + traits.IndexerType | + traits.IterableType | + traits.SizerType, + } +} + +// NewMapType creates an instance of a map type value with the provided key and value types. +func NewMapType(keyType, valueType *Type) *Type { + return &Type{ + kind: MapKind, + parameters: []*Type{keyType, valueType}, + runtimeTypeName: "map", + traitMask: traits.ContainerType | + traits.IndexerType | + traits.IterableType | + traits.SizerType, + } +} + +// NewNullableType creates an instance of a nullable type with the provided wrapped type. +// +// Note: only primitive types are supported as wrapped types. +func NewNullableType(wrapped *Type) *Type { + return &Type{ + kind: wrapped.Kind(), + parameters: wrapped.Parameters(), + runtimeTypeName: wrapped.TypeName(), + traitMask: wrapped.traitMask, + isAssignableType: func(other *Type) bool { + return NullType.IsAssignableType(other) || wrapped.IsAssignableType(other) + }, + isAssignableRuntimeType: func(other ref.Val) bool { + return NullType.IsAssignableRuntimeType(other) || wrapped.IsAssignableRuntimeType(other) + }, + } +} + +// NewOptionalType creates an abstract parameterized type instance corresponding to CEL's notion of optional. +func NewOptionalType(param *Type) *Type { + return NewOpaqueType("optional_type", param) +} + +// NewOpaqueType creates an abstract parameterized type with a given name. +func NewOpaqueType(name string, params ...*Type) *Type { + return &Type{ + kind: OpaqueKind, + parameters: params, + runtimeTypeName: name, + } +} + +// NewObjectType creates a type reference to an externally defined type, e.g. a protobuf message type. +// +// An object type is assumed to support field presence testing and field indexing. Additionally, the +// type may also indicate additional traits through the use of the optional traits vararg argument. +func NewObjectType(typeName string, traits ...int) *Type { + // Function sanitizes object types on the fly + if wkt, found := checkedWellKnowns[typeName]; found { + return wkt + } + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &Type{ + kind: StructKind, + parameters: emptyParams, + runtimeTypeName: typeName, + traitMask: structTypeTraitMask | traitMask, + } +} + +// NewObjectTypeValue creates a type reference to an externally defined type. +// +// Deprecated: use cel.ObjectType(typeName) +func NewObjectTypeValue(typeName string) *Type { + return NewObjectType(typeName) +} + +// NewTypeValue creates an opaque type which has a set of optional type traits as defined in +// the common/types/traits package. +// +// Deprecated: use cel.ObjectType(typeName, traits) +func NewTypeValue(typeName string, traits ...int) *Type { + traitMask := 0 + for _, trait := range traits { + traitMask |= trait + } + return &Type{ + kind: StructKind, + parameters: emptyParams, + runtimeTypeName: typeName, + traitMask: traitMask, + } +} + +// NewTypeParamType creates a parameterized type instance. +func NewTypeParamType(paramName string) *Type { + return &Type{ + kind: TypeParamKind, + runtimeTypeName: paramName, + } +} + +// NewTypeTypeWithParam creates a type with a type parameter. +// Used for type-checking purposes, but equivalent to TypeType otherwise. +func NewTypeTypeWithParam(param *Type) *Type { + return &Type{ + kind: TypeKind, + runtimeTypeName: "type", + parameters: []*Type{param}, + } +} + +// TypeToExprType converts a CEL-native type representation to a protobuf CEL Type representation. +func TypeToExprType(t *Type) (*exprpb.Type, error) { + switch t.Kind() { + case AnyKind: + return chkdecls.Any, nil + case BoolKind: + return maybeWrapper(t, chkdecls.Bool), nil + case BytesKind: + return maybeWrapper(t, chkdecls.Bytes), nil + case DoubleKind: + return maybeWrapper(t, chkdecls.Double), nil + case DurationKind: + return chkdecls.Duration, nil + case DynKind: + return chkdecls.Dyn, nil + case ErrorKind: + return chkdecls.Error, nil + case IntKind: + return maybeWrapper(t, chkdecls.Int), nil + case ListKind: + if len(t.Parameters()) != 1 { + return nil, fmt.Errorf("invalid list, got %d parameters, wanted one", len(t.Parameters())) + } + et, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + return chkdecls.NewListType(et), nil + case MapKind: + if len(t.Parameters()) != 2 { + return nil, fmt.Errorf("invalid map, got %d parameters, wanted two", len(t.Parameters())) + } + kt, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + vt, err := TypeToExprType(t.Parameters()[1]) + if err != nil { + return nil, err + } + return chkdecls.NewMapType(kt, vt), nil + case NullTypeKind: + return chkdecls.Null, nil + case OpaqueKind: + params := make([]*exprpb.Type, len(t.Parameters())) + for i, p := range t.Parameters() { + pt, err := TypeToExprType(p) + if err != nil { + return nil, err + } + params[i] = pt + } + return chkdecls.NewAbstractType(t.TypeName(), params...), nil + case StringKind: + return maybeWrapper(t, chkdecls.String), nil + case StructKind: + return chkdecls.NewObjectType(t.TypeName()), nil + case TimestampKind: + return chkdecls.Timestamp, nil + case TypeParamKind: + return chkdecls.NewTypeParamType(t.TypeName()), nil + case TypeKind: + if len(t.Parameters()) == 1 { + p, err := TypeToExprType(t.Parameters()[0]) + if err != nil { + return nil, err + } + return chkdecls.NewTypeType(p), nil + } + return chkdecls.NewTypeType(nil), nil + case UintKind: + return maybeWrapper(t, chkdecls.Uint), nil + } + return nil, fmt.Errorf("missing type conversion to proto: %v", t) +} + +// ExprTypeToType converts a protobuf CEL type representation to a CEL-native type representation. +func ExprTypeToType(t *exprpb.Type) (*Type, error) { + switch t.GetTypeKind().(type) { + case *exprpb.Type_Dyn: + return DynType, nil + case *exprpb.Type_AbstractType_: + paramTypes := make([]*Type, len(t.GetAbstractType().GetParameterTypes())) + for i, p := range t.GetAbstractType().GetParameterTypes() { + pt, err := ExprTypeToType(p) + if err != nil { + return nil, err + } + paramTypes[i] = pt + } + return NewOpaqueType(t.GetAbstractType().GetName(), paramTypes...), nil + case *exprpb.Type_ListType_: + et, err := ExprTypeToType(t.GetListType().GetElemType()) + if err != nil { + return nil, err + } + return NewListType(et), nil + case *exprpb.Type_MapType_: + kt, err := ExprTypeToType(t.GetMapType().GetKeyType()) + if err != nil { + return nil, err + } + vt, err := ExprTypeToType(t.GetMapType().GetValueType()) + if err != nil { + return nil, err + } + return NewMapType(kt, vt), nil + case *exprpb.Type_MessageType: + return NewObjectType(t.GetMessageType()), nil + case *exprpb.Type_Null: + return NullType, nil + case *exprpb.Type_Primitive: + switch t.GetPrimitive() { + case exprpb.Type_BOOL: + return BoolType, nil + case exprpb.Type_BYTES: + return BytesType, nil + case exprpb.Type_DOUBLE: + return DoubleType, nil + case exprpb.Type_INT64: + return IntType, nil + case exprpb.Type_STRING: + return StringType, nil + case exprpb.Type_UINT64: + return UintType, nil + default: + return nil, fmt.Errorf("unsupported primitive type: %v", t) + } + case *exprpb.Type_TypeParam: + return NewTypeParamType(t.GetTypeParam()), nil + case *exprpb.Type_Type: + if t.GetType().GetTypeKind() != nil { + p, err := ExprTypeToType(t.GetType()) + if err != nil { + return nil, err + } + return NewTypeTypeWithParam(p), nil + } + return TypeType, nil + case *exprpb.Type_WellKnown: + switch t.GetWellKnown() { + case exprpb.Type_ANY: + return AnyType, nil + case exprpb.Type_DURATION: + return DurationType, nil + case exprpb.Type_TIMESTAMP: + return TimestampType, nil + default: + return nil, fmt.Errorf("unsupported well-known type: %v", t) + } + case *exprpb.Type_Wrapper: + t, err := ExprTypeToType(&exprpb.Type{TypeKind: &exprpb.Type_Primitive{Primitive: t.GetWrapper()}}) + if err != nil { + return nil, err + } + return NewNullableType(t), nil + case *exprpb.Type_Error: + return ErrorType, nil + default: + return nil, fmt.Errorf("unsupported type: %v", t) + } +} + +func maybeWrapper(t *Type, pbType *exprpb.Type) *exprpb.Type { + if t.IsAssignableType(NullType) { + return chkdecls.NewWrapperType(pbType) + } + return pbType +} + +func maybeForeignType(t ref.Type) *Type { + if celType, ok := t.(*Type); ok { + return celType + } + // Inspect the incoming type to determine its traits. The assumption will be that the incoming + // type does not have any field values; however, if the trait mask indicates that field testing + // and indexing are supported, the foreign type is marked as a struct. + traitMask := 0 + for _, trait := range allTraits { + if t.HasTrait(trait) { + traitMask |= trait + } + } + // Treat the value like a struct. If it has no fields, this is harmless to denote the type + // as such since it basically becomes an opaque type by convention. + return NewObjectType(t.TypeName(), traitMask) +} + +var ( + checkedWellKnowns = map[string]*Type{ + // Wrapper types. + "google.protobuf.BoolValue": NewNullableType(BoolType), + "google.protobuf.BytesValue": NewNullableType(BytesType), + "google.protobuf.DoubleValue": NewNullableType(DoubleType), + "google.protobuf.FloatValue": NewNullableType(DoubleType), + "google.protobuf.Int64Value": NewNullableType(IntType), + "google.protobuf.Int32Value": NewNullableType(IntType), + "google.protobuf.UInt64Value": NewNullableType(UintType), + "google.protobuf.UInt32Value": NewNullableType(UintType), + "google.protobuf.StringValue": NewNullableType(StringType), + // Well-known types. + "google.protobuf.Any": AnyType, + "google.protobuf.Duration": DurationType, + "google.protobuf.Timestamp": TimestampType, + // Json types. + "google.protobuf.ListValue": NewListType(DynType), + "google.protobuf.NullValue": NullType, + "google.protobuf.Struct": NewMapType(StringType, DynType), + "google.protobuf.Value": DynType, + } + + emptyParams = []*Type{} + + allTraits = []int{ + traits.AdderType, + traits.ComparerType, + traits.ContainerType, + traits.DividerType, + traits.FieldTesterType, + traits.IndexerType, + traits.IterableType, + traits.IteratorType, + traits.MatcherType, + traits.ModderType, + traits.MultiplierType, + traits.NegatorType, + traits.ReceiverType, + traits.SizerType, + traits.SubtractorType, + } + + structTypeTraitMask = traits.FieldTesterType | traits.IndexerType +) diff --git a/vendor/github.com/google/cel-go/common/types/uint.go b/vendor/github.com/google/cel-go/common/types/uint.go new file mode 100644 index 00000000..6d74f30d --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/uint.go @@ -0,0 +1,256 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + "strconv" + + "github.com/google/cel-go/common/types/ref" + + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +// Uint type implementation which supports comparison and math operators. +type Uint uint64 + +var ( + uint32WrapperType = reflect.TypeOf(&wrapperspb.UInt32Value{}) + + uint64WrapperType = reflect.TypeOf(&wrapperspb.UInt64Value{}) +) + +// Uint constants +const ( + uintZero = Uint(0) +) + +// Add implements traits.Adder.Add. +func (i Uint) Add(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := addUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(val) +} + +// Compare implements traits.Comparer.Compare. +func (i Uint) Compare(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return NewErr("NaN values cannot be ordered") + } + return compareUintDouble(i, ov) + case Int: + return compareUintInt(i, ov) + case Uint: + return compareUint(i, ov) + default: + return MaybeNoSuchOverloadErr(other) + } +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (i Uint) ConvertToNative(typeDesc reflect.Type) (any, error) { + switch typeDesc.Kind() { + case reflect.Uint, reflect.Uint32: + v, err := uint64ToUint32Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint8: + v, err := uint64ToUint8Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint16: + v, err := uint64ToUint16Checked(uint64(i)) + if err != nil { + return 0, err + } + return reflect.ValueOf(v).Convert(typeDesc).Interface(), nil + case reflect.Uint64: + return reflect.ValueOf(i).Convert(typeDesc).Interface(), nil + case reflect.Ptr: + switch typeDesc { + case anyValueType: + // Primitives must be wrapped before being set on an Any field. + return anypb.New(wrapperspb.UInt64(uint64(i))) + case jsonValueType: + // JSON can accurately represent 32-bit uints as floating point values. + if i.isJSONSafe() { + return structpb.NewNumberValue(float64(i)), nil + } + // Proto3 to JSON conversion requires string-formatted uint64 values + // since the conversion to floating point would result in truncation. + return structpb.NewStringValue(strconv.FormatUint(uint64(i), 10)), nil + case uint32WrapperType: + // Convert the value to a wrapperspb.UInt32Value, error on overflow. + v, err := uint64ToUint32Checked(uint64(i)) + if err != nil { + return 0, err + } + return wrapperspb.UInt32(v), nil + case uint64WrapperType: + // Convert the value to a wrapperspb.UInt64Value. + return wrapperspb.UInt64(uint64(i)), nil + } + switch typeDesc.Elem().Kind() { + case reflect.Uint32: + v, err := uint64ToUint32Checked(uint64(i)) + if err != nil { + return 0, err + } + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + case reflect.Uint64: + v := uint64(i) + p := reflect.New(typeDesc.Elem()) + p.Elem().Set(reflect.ValueOf(v).Convert(typeDesc.Elem())) + return p.Interface(), nil + } + case reflect.Interface: + iv := i.Value() + if reflect.TypeOf(iv).Implements(typeDesc) { + return iv, nil + } + if reflect.TypeOf(i).Implements(typeDesc) { + return i, nil + } + } + return nil, fmt.Errorf("unsupported type conversion from 'uint' to %v", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (i Uint) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IntType: + v, err := uint64ToInt64Checked(uint64(i)) + if err != nil { + return WrapErr(err) + } + return Int(v) + case UintType: + return i + case DoubleType: + return Double(i) + case StringType: + return String(fmt.Sprintf("%d", uint64(i))) + case TypeType: + return UintType + } + return NewErr("type conversion error from '%s' to '%s'", UintType, typeVal) +} + +// Divide implements traits.Divider.Divide. +func (i Uint) Divide(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + div, err := divideUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(div) +} + +// Equal implements ref.Val.Equal. +func (i Uint) Equal(other ref.Val) ref.Val { + switch ov := other.(type) { + case Double: + if math.IsNaN(float64(ov)) { + return False + } + return Bool(compareUintDouble(i, ov) == 0) + case Int: + return Bool(compareUintInt(i, ov) == 0) + case Uint: + return Bool(i == ov) + default: + return False + } +} + +// IsZeroValue returns true if the uint is zero. +func (i Uint) IsZeroValue() bool { + return i == 0 +} + +// Modulo implements traits.Modder.Modulo. +func (i Uint) Modulo(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + mod, err := moduloUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(mod) +} + +// Multiply implements traits.Multiplier.Multiply. +func (i Uint) Multiply(other ref.Val) ref.Val { + otherUint, ok := other.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(other) + } + val, err := multiplyUint64Checked(uint64(i), uint64(otherUint)) + if err != nil { + return WrapErr(err) + } + return Uint(val) +} + +// Subtract implements traits.Subtractor.Subtract. +func (i Uint) Subtract(subtrahend ref.Val) ref.Val { + subtraUint, ok := subtrahend.(Uint) + if !ok { + return MaybeNoSuchOverloadErr(subtrahend) + } + val, err := subtractUint64Checked(uint64(i), uint64(subtraUint)) + if err != nil { + return WrapErr(err) + } + return Uint(val) +} + +// Type implements ref.Val.Type. +func (i Uint) Type() ref.Type { + return UintType +} + +// Value implements ref.Val.Value. +func (i Uint) Value() any { + return uint64(i) +} + +// isJSONSafe indicates whether the uint is safely representable as a floating point value in JSON. +func (i Uint) isJSONSafe() bool { + return i <= maxIntJSON +} diff --git a/vendor/github.com/google/cel-go/common/types/unknown.go b/vendor/github.com/google/cel-go/common/types/unknown.go new file mode 100644 index 00000000..9dd2b257 --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/unknown.go @@ -0,0 +1,326 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "math" + "reflect" + "sort" + "strings" + "unicode" + + "github.com/google/cel-go/common/types/ref" +) + +var ( + unspecifiedAttribute = &AttributeTrail{qualifierPath: []any{}} +) + +// NewAttributeTrail creates a new simple attribute from a variable name. +func NewAttributeTrail(variable string) *AttributeTrail { + if variable == "" { + return unspecifiedAttribute + } + return &AttributeTrail{variable: variable} +} + +// AttributeTrail specifies a variable with an optional qualifier path. An attribute value is expected to +// correspond to an AbsoluteAttribute, meaning a field selection which starts with a top-level variable. +// +// The qualifer path elements adhere to the AttributeQualifier type constraint. +type AttributeTrail struct { + variable string + qualifierPath []any +} + +// Equal returns whether two attribute values have the same variable name and qualifier paths. +func (a *AttributeTrail) Equal(other *AttributeTrail) bool { + if a.Variable() != other.Variable() || len(a.QualifierPath()) != len(other.QualifierPath()) { + return false + } + for i, q := range a.QualifierPath() { + qual := other.QualifierPath()[i] + if !qualifiersEqual(q, qual) { + return false + } + } + return true +} + +func qualifiersEqual(a, b any) bool { + if a == b { + return true + } + switch numA := a.(type) { + case int64: + numB, ok := b.(uint64) + if !ok { + return false + } + return intUintEqual(numA, numB) + case uint64: + numB, ok := b.(int64) + if !ok { + return false + } + return intUintEqual(numB, numA) + default: + return false + } +} + +func intUintEqual(i int64, u uint64) bool { + if i < 0 || u > math.MaxInt64 { + return false + } + return i == int64(u) +} + +// Variable returns the variable name associated with the attribute. +func (a *AttributeTrail) Variable() string { + return a.variable +} + +// QualifierPath returns the optional set of qualifying fields or indices applied to the variable. +func (a *AttributeTrail) QualifierPath() []any { + return a.qualifierPath +} + +// String returns the string representation of the Attribute. +func (a *AttributeTrail) String() string { + if a.variable == "" { + return "" + } + var str strings.Builder + str.WriteString(a.variable) + for _, q := range a.qualifierPath { + switch q := q.(type) { + case bool, int64: + str.WriteString(fmt.Sprintf("[%v]", q)) + case uint64: + str.WriteString(fmt.Sprintf("[%vu]", q)) + case string: + if isIdentifierCharacter(q) { + str.WriteString(fmt.Sprintf(".%v", q)) + } else { + str.WriteString(fmt.Sprintf("[%q]", q)) + } + } + } + return str.String() +} + +func isIdentifierCharacter(str string) bool { + for _, c := range str { + if unicode.IsLetter(c) || unicode.IsDigit(c) || string(c) == "_" { + continue + } + return false + } + return true +} + +// AttributeQualifier constrains the possible types which may be used to qualify an attribute. +type AttributeQualifier interface { + bool | int64 | uint64 | string +} + +// QualifyAttribute qualifies an attribute using a valid AttributeQualifier type. +func QualifyAttribute[T AttributeQualifier](attr *AttributeTrail, qualifier T) *AttributeTrail { + attr.qualifierPath = append(attr.qualifierPath, qualifier) + return attr +} + +// Unknown type which collects expression ids which caused the current value to become unknown. +type Unknown struct { + attributeTrails map[int64][]*AttributeTrail +} + +// NewUnknown creates a new unknown at a given expression id for an attribute. +// +// If the attribute is nil, the attribute value will be the `unspecifiedAttribute`. +func NewUnknown(id int64, attr *AttributeTrail) *Unknown { + if attr == nil { + attr = unspecifiedAttribute + } + return &Unknown{ + attributeTrails: map[int64][]*AttributeTrail{id: {attr}}, + } +} + +// IDs returns the set of unknown expression ids contained by this value. +// +// Numeric identifiers are guaranteed to be in sorted order. +func (u *Unknown) IDs() []int64 { + ids := make(int64Slice, len(u.attributeTrails)) + i := 0 + for id := range u.attributeTrails { + ids[i] = id + i++ + } + ids.Sort() + return ids +} + +// GetAttributeTrails returns the attribute trails, if present, missing for a given expression id. +func (u *Unknown) GetAttributeTrails(id int64) ([]*AttributeTrail, bool) { + trails, found := u.attributeTrails[id] + return trails, found +} + +// Contains returns true if the input unknown is a subset of the current unknown. +func (u *Unknown) Contains(other *Unknown) bool { + for id, otherTrails := range other.attributeTrails { + trails, found := u.attributeTrails[id] + if !found || len(otherTrails) != len(trails) { + return false + } + for _, ot := range otherTrails { + found := false + for _, t := range trails { + if t.Equal(ot) { + found = true + break + } + } + if !found { + return false + } + } + } + return true +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (u *Unknown) ConvertToNative(typeDesc reflect.Type) (any, error) { + return u.Value(), nil +} + +// ConvertToType is an identity function since unknown values cannot be modified. +func (u *Unknown) ConvertToType(typeVal ref.Type) ref.Val { + return u +} + +// Equal is an identity function since unknown values cannot be modified. +func (u *Unknown) Equal(other ref.Val) ref.Val { + return u +} + +// String implements the Stringer interface +func (u *Unknown) String() string { + var str strings.Builder + for id, attrs := range u.attributeTrails { + if str.Len() != 0 { + str.WriteString(", ") + } + if len(attrs) == 1 { + str.WriteString(fmt.Sprintf("%v (%d)", attrs[0], id)) + } else { + str.WriteString(fmt.Sprintf("%v (%d)", attrs, id)) + } + } + return str.String() +} + +// Type implements ref.Val.Type. +func (u *Unknown) Type() ref.Type { + return UnknownType +} + +// Value implements ref.Val.Value. +func (u *Unknown) Value() any { + return u +} + +// IsUnknown returns whether the element ref.Val is in instance of *types.Unknown +func IsUnknown(val ref.Val) bool { + switch val.(type) { + case *Unknown: + return true + default: + return false + } +} + +// MaybeMergeUnknowns determines whether an input value and another, possibly nil, unknown will produce +// an unknown result. +// +// If the input `val` is another Unknown, then the result will be the merge of the `val` and the input +// `unk`. If the `val` is not unknown, then the result will depend on whether the input `unk` is nil. +// If both values are non-nil and unknown, then the return value will be a merge of both unknowns. +func MaybeMergeUnknowns(val ref.Val, unk *Unknown) (*Unknown, bool) { + src, isUnk := val.(*Unknown) + if !isUnk { + if unk != nil { + return unk, true + } + return unk, false + } + return MergeUnknowns(src, unk), true +} + +// MergeUnknowns combines two unknown values into a new unknown value. +func MergeUnknowns(unk1, unk2 *Unknown) *Unknown { + if unk1 == nil { + return unk2 + } + if unk2 == nil { + return unk1 + } + out := &Unknown{ + attributeTrails: make(map[int64][]*AttributeTrail, len(unk1.attributeTrails)+len(unk2.attributeTrails)), + } + for id, ats := range unk1.attributeTrails { + out.attributeTrails[id] = ats + } + for id, ats := range unk2.attributeTrails { + existing, found := out.attributeTrails[id] + if !found { + out.attributeTrails[id] = ats + continue + } + + for _, at := range ats { + found := false + for _, et := range existing { + if at.Equal(et) { + found = true + break + } + } + if !found { + existing = append(existing, at) + } + } + out.attributeTrails[id] = existing + } + return out +} + +// int64Slice is an implementation of the sort.Interface +type int64Slice []int64 + +// Len returns the number of elements in the slice. +func (x int64Slice) Len() int { return len(x) } + +// Less indicates whether the value at index i is less than the value at index j. +func (x int64Slice) Less(i, j int) bool { return x[i] < x[j] } + +// Swap swaps the values at indices i and j in place. +func (x int64Slice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +// Sort is a convenience method: x.Sort() calls Sort(x). +func (x int64Slice) Sort() { sort.Sort(x) } diff --git a/vendor/github.com/google/cel-go/common/types/util.go b/vendor/github.com/google/cel-go/common/types/util.go new file mode 100644 index 00000000..71662eee --- /dev/null +++ b/vendor/github.com/google/cel-go/common/types/util.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// IsUnknownOrError returns whether the input element ref.Val is an ErrType or UnknownType. +func IsUnknownOrError(val ref.Val) bool { + switch val.(type) { + case *Unknown, *Err: + return true + } + return false +} + +// IsPrimitiveType returns whether the input element ref.Val is a primitive type. +// Note, primitive types do not include well-known types such as Duration and Timestamp. +func IsPrimitiveType(val ref.Val) bool { + switch val.Type() { + case BoolType, BytesType, DoubleType, IntType, StringType, UintType: + return true + } + return false +} + +// Equal returns whether the two ref.Value are heterogeneously equivalent. +func Equal(lhs ref.Val, rhs ref.Val) ref.Val { + lNull := lhs == NullValue + rNull := rhs == NullValue + if lNull || rNull { + return Bool(lNull == rNull) + } + return lhs.Equal(rhs) +} diff --git a/vendor/github.com/google/cel-go/ext/BUILD.bazel b/vendor/github.com/google/cel-go/ext/BUILD.bazel new file mode 100644 index 00000000..db223da2 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/BUILD.bazel @@ -0,0 +1,70 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "bindings.go", + "encoders.go", + "formatting.go", + "guards.go", + "lists.go", + "math.go", + "native.go", + "protos.go", + "sets.go", + "strings.go", + ], + importpath = "github.com/google/cel-go/ext", + visibility = ["//visibility:public"], + deps = [ + "//cel:go_default_library", + "//checker:go_default_library", + "//common/ast:go_default_library", + "//common/overloads:go_default_library", + "//common/operators:go_default_library", + "//common/types:go_default_library", + "//common/types/pb:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//interpreter:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//reflect/protoreflect:go_default_library", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_x_text//language:go_default_library", + "@org_golang_x_text//message:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "encoders_test.go", + "lists_test.go", + "math_test.go", + "native_test.go", + "protos_test.go", + "sets_test.go", + "strings_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//cel:go_default_library", + "//checker:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + "@org_golang_google_protobuf//encoding/protojson:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/ext/README.md b/vendor/github.com/google/cel-go/ext/README.md new file mode 100644 index 00000000..a55f56de --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/README.md @@ -0,0 +1,669 @@ +# Extensions + +CEL extensions are a related set of constants, functions, macros, or other +features which may not be covered by the core CEL spec. + +## Bindings + +Returns a cel.EnvOption to configure support for local variable bindings +in expressions. + +# Cel.Bind + +Binds a simple identifier to an initialization expression which may be used +in a subsequenct result expression. Bindings may also be nested within each +other. + + cel.bind(, , ) + +Examples: + + cel.bind(a, 'hello', + cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" + + // Avoid a list allocation within the exists comprehension. + cel.bind(valid_values, [a, b, c], + [d, e, f].exists(elem, elem in valid_values)) + +Local bindings are not guaranteed to be evaluated before use. + +## Encoders + +Encoding utilies for marshalling data into standardized representations. + +### Base64.Decode + +Decodes base64-encoded string to bytes. + +This function will return an error if the string input is not +base64-encoded. + + base64.decode() -> + +Examples: + + base64.decode('aGVsbG8=') // return b'hello' + base64.decode('aGVsbG8') // error + +### Base64.Encode + +Encodes bytes to a base64-encoded string. + + base64.encode() -> + +Example: + + base64.encode(b'hello') // return 'aGVsbG8=' + +## Math + +Math helper macros and functions. + +Note, all macros use the 'math' namespace; however, at the time of macro +expansion the namespace looks just like any other identifier. If you are +currently using a variable named 'math', the macro will likely work just as +intended; however, there is some chance for collision. + +### Math.Greatest + +Returns the greatest valued number present in the arguments to the macro. + +Greatest is a variable argument count macro which must take at least one +argument. Simple numeric and list literals are supported as valid argument +types; however, other literals will be flagged as errors during macro +expansion. If the argument expression does not resolve to a numeric or +list(numeric) type during type-checking, or during runtime then an error +will be produced. If a list argument is empty, this too will produce an +error. + + math.greatest(, ...) -> + +Examples: + + math.greatest(1) // 1 + math.greatest(1u, 2u) // 2u + math.greatest(-42.0, -21.5, -100.0) // -21.5 + math.greatest([-42.0, -21.5, -100.0]) // -21.5 + math.greatest(numbers) // numbers must be list(numeric) + + math.greatest() // parse error + math.greatest('string') // parse error + math.greatest(a, b) // check-time error if a or b is non-numeric + math.greatest(dyn('string')) // runtime error + +### Math.Least + +Returns the least valued number present in the arguments to the macro. + +Least is a variable argument count macro which must take at least one +argument. Simple numeric and list literals are supported as valid argument +types; however, other literals will be flagged as errors during macro +expansion. If the argument expression does not resolve to a numeric or +list(numeric) type during type-checking, or during runtime then an error +will be produced. If a list argument is empty, this too will produce an +error. + + math.least(, ...) -> + +Examples: + + math.least(1) // 1 + math.least(1u, 2u) // 1u + math.least(-42.0, -21.5, -100.0) // -100.0 + math.least([-42.0, -21.5, -100.0]) // -100.0 + math.least(numbers) // numbers must be list(numeric) + + math.least() // parse error + math.least('string') // parse error + math.least(a, b) // check-time error if a or b is non-numeric + math.least(dyn('string')) // runtime error + +### Math.BitOr + +Introduced at version: 1 + +Performs a bitwise-OR operation over two int or uint values. + + math.bitOr(, ) -> + math.bitOr(, ) -> + +Examples: + + math.bitOr(1u, 2u) // returns 3u + math.bitOr(-2, -4) // returns -2 + +### Math.BitAnd + +Introduced at version: 1 + +Performs a bitwise-AND operation over two int or uint values. + + math.bitAnd(, ) -> + math.bitAnd(, ) -> + +Examples: + + math.bitAnd(3u, 2u) // return 2u + math.bitAnd(3, 5) // returns 3 + math.bitAnd(-3, -5) // returns -7 + +### Math.BitXor + +Introduced at version: 1 + + math.bitXor(, ) -> + math.bitXor(, ) -> + +Performs a bitwise-XOR operation over two int or uint values. + +Examples: + + math.bitXor(3u, 5u) // returns 6u + math.bitXor(1, 3) // returns 2 + +### Math.BitNot + +Introduced at version: 1 + +Function which accepts a single int or uint and performs a bitwise-NOT +ones-complement of the given binary value. + + math.bitNot() -> + math.bitNot() -> + +Examples + + math.bitNot(1) // returns -1 + math.bitNot(-1) // return 0 + math.bitNot(0u) // returns 18446744073709551615u + +### Math.BitShiftLeft + +Introduced at version: 1 + +Perform a left shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will be always be returned +since the number of bits shifted is greater than or equal to the total bit +length of the number being shifted. Negative valued bit shifts will result +in a runtime error. + + math.bitShiftLeft(, ) -> + math.bitShiftLeft(, ) -> + +Examples + + math.bitShiftLeft(1, 2) // returns 4 + math.bitShiftLeft(-1, 2) // returns -4 + math.bitShiftLeft(1u, 2) // return 4u + math.bitShiftLeft(1u, 200) // returns 0u + +### Math.BitShiftRight + +Introduced at version: 1 + +Perform a right shift of bits on the first parameter, by the amount of bits +specified in the second parameter. The first parameter is either a uint or +an int. The second parameter must be an int. + +When the second parameter is 64 or greater, 0 will always be returned since +the number of bits shifted is greater than or equal to the total bit length +of the number being shifted. Negative valued bit shifts will result in a +runtime error. + +The sign bit extension will not be preserved for this operation: vacant bits +on the left are filled with 0. + + math.bitShiftRight(, ) -> + math.bitShiftRight(, ) -> + +Examples + + math.bitShiftRight(1024, 2) // returns 256 + math.bitShiftRight(1024u, 2) // returns 256u + math.bitShiftRight(1024u, 64) // returns 0u + +### Math.Ceil + +Introduced at version: 1 + +Compute the ceiling of a double value. + + math.ceil() -> + +Examples: + + math.ceil(1.2) // returns 2.0 + math.ceil(-1.2) // returns -1.0 + +### Math.Floor + +Introduced at version: 1 + +Compute the floor of a double value. + + math.floor() -> + +Examples: + + math.floor(1.2) // returns 1.0 + math.floor(-1.2) // returns -2.0 + +### Math.Round + +Introduced at version: 1 + +Rounds the double value to the nearest whole number with ties rounding away +from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. + + math.round() -> + +Examples: + + math.round(1.2) // returns 1.0 + math.round(1.5) // returns 2.0 + math.round(-1.5) // returns -2.0 + +### Math.Trunc + +Introduced at version: 1 + +Truncates the fractional portion of the double value. + + math.trunc() -> + +Examples: + + math.trunc(-1.3) // returns -1.0 + math.trunc(1.3) // returns 1.0 + +### Math.Abs + +Introduced at version: 1 + +Returns the absolute value of the numeric type provided as input. If the +value is NaN, the output is NaN. If the input is int64 min, the function +will result in an overflow error. + + math.abs() -> + math.abs() -> + math.abs() -> + +Examples: + + math.abs(-1) // returns 1 + math.abs(1) // returns 1 + math.abs(-9223372036854775808) // overlflow error + +### Math.Sign + +Introduced at version: 1 + +Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +uint depending on the overload. For floating point values, if NaN is +provided as input, the output is also NaN. The implementation does not +differentiate between positive and negative zero. + + math.sign() -> + math.sign() -> + math.sign() -> + +Examples: + + math.sign(-42) // returns -1 + math.sign(0) // returns 0 + math.sign(42) // returns 1 + +### Math.IsInf + +Introduced at version: 1 + +Returns true if the input double value is -Inf or +Inf. + + math.isInf() -> + +Examples: + + math.isInf(1.0/0.0) // returns true + math.isInf(1.2) // returns false + +### Math.IsNaN + +Introduced at version: 1 + +Returns true if the input double value is NaN, false otherwise. + + math.isNaN() -> + +Examples: + + math.isNaN(0.0/0.0) // returns true + math.isNaN(1.2) // returns false + +### Math.IsFinite + +Introduced at version: 1 + +Returns true if the value is a finite number. Equivalent in behavior to: +!math.isNaN(double) && !math.isInf(double) + + math.isFinite() -> + +Examples: + + math.isFinite(0.0/0.0) // returns false + math.isFinite(1.2) // returns true + +## Protos + +Protos configure extended macros and functions for proto manipulation. + +Note, all macros use the 'proto' namespace; however, at the time of macro +expansion the namespace looks just like any other identifier. If you are +currently using a variable named 'proto', the macro will likely work just as +you intend; however, there is some chance for collision. + +### Protos.GetExt + +Macro which generates a select expression that retrieves an extension field +from the input proto2 syntax message. If the field is not set, the default +value forthe extension field is returned according to safe-traversal semantics. + + proto.getExt(, ) -> + +Example: + + proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value + +### Protos.HasExt + +Macro which generates a test-only select expression that determines whether +an extension field is set on a proto2 syntax message. + + proto.hasExt(, ) -> + +Example: + + proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false + +## Lists + +Extended functions for list manipulation. As a general note, all indices are +zero-based. + +### Slice + + +Returns a new sub-list using the indexes provided. + + .slice(, ) -> + +Examples: + + [1,2,3,4].slice(1, 3) // return [2, 3] + [1,2,3,4].slice(2, 4) // return [3 ,4] + +## Sets + +Sets provides set relationship tests. + +There is no set type within CEL, and while one may be introduced in the +future, there are cases where a `list` type is known to behave like a set. +For such cases, this library provides some basic functionality for +determining set containment, equivalence, and intersection. + +### Sets.Contains + +Returns whether the first list argument contains all elements in the second +list argument. The list may contain elements of any type and standard CEL +equality is used to determine whether a value exists in both lists. If the +second list is empty, the result will always return true. + + sets.contains(list(T), list(T)) -> bool + +Examples: + + sets.contains([], []) // true + sets.contains([], [1]) // false + sets.contains([1, 2, 3, 4], [2, 3]) // true + sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true + +### Sets.Equivalent + +Returns whether the first and second list are set equivalent. Lists are set +equivalent if for every item in the first list, there is an element in the +second which is equal. The lists may not be of the same size as they do not +guarantee the elements within them are unique, so size does not factor into +the computation. + + sets.equivalent(list(T), list(T)) -> bool + +Examples: + + sets.equivalent([], []) // true + sets.equivalent([1], [1, 1]) // true + sets.equivalent([1], [1u, 1.0]) // true + sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true + +### Sets.Intersects + +Returns whether the first list has at least one element whose value is equal +to an element in the second list. If either list is empty, the result will +be false. + + sets.intersects(list(T), list(T)) -> bool + +Examples: + + sets.intersects([1], []) // false + sets.intersects([1], [1, 2]) // true + sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true + +## Strings + +Extended functions for string manipulation. As a general note, all indices are +zero-based. + +### CharAt + +Returns the character at the given position. If the position is negative, or +greater than the length of the string, the function will produce an error: + + .charAt() -> + +Examples: + + 'hello'.charAt(4) // return 'o' + 'hello'.charAt(5) // return '' + 'hello'.charAt(-1) // error + +### IndexOf + +Returns the integer index of the first occurrence of the search string. If the +search string is not found the function returns -1. + +The function also accepts an optional position from which to begin the +substring search. If the substring is the empty string, the index where the +search starts is returned (zero or custom). + + .indexOf() -> + .indexOf(, ) -> + +Examples: + + 'hello mellow'.indexOf('') // returns 0 + 'hello mellow'.indexOf('ello') // returns 1 + 'hello mellow'.indexOf('jello') // returns -1 + 'hello mellow'.indexOf('', 2) // returns 2 + 'hello mellow'.indexOf('ello', 2) // returns 7 + 'hello mellow'.indexOf('ello', 20) // error + +### Join + +Returns a new string where the elements of string list are concatenated. + +The function also accepts an optional separator which is placed between +elements in the resulting string. + + >.join() -> + >.join() -> + +Examples: + + ['hello', 'mellow'].join() // returns 'hellomellow' + ['hello', 'mellow'].join(' ') // returns 'hello mellow' + [].join() // returns '' + [].join('/') // returns '' + +### LastIndexOf + +Returns the integer index of the last occurrence of the search string. If the +search string is not found the function returns -1. + +The function also accepts an optional position which represents the last index +to be considered as the beginning of the substring match. If the substring is +the empty string, the index where the search starts is returned (string length +or custom). + + .lastIndexOf() -> + .lastIndexOf(, ) -> + +Examples: + + 'hello mellow'.lastIndexOf('') // returns 12 + 'hello mellow'.lastIndexOf('ello') // returns 7 + 'hello mellow'.lastIndexOf('jello') // returns -1 + 'hello mellow'.lastIndexOf('ello', 6) // returns 1 + 'hello mellow'.lastIndexOf('ello', -1) // error + +### LowerAscii + +Returns a new string where all ASCII characters are lower-cased. + +This function does not perform Unicode case-mapping for characters outside the +ASCII range. + + .lowerAscii() -> + +Examples: + + 'TacoCat'.lowerAscii() // returns 'tacocat' + 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' + +### Quote + +**Introduced in version 1** + +Takes the given string and makes it safe to print (without any formatting due to escape sequences). +If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD. + + strings.quote() + +Examples: + + strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""' + strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"' + +### Replace + +Returns a new string based on the target, which replaces the occurrences of a +search string with a replacement string if present. The function accepts an +optional limit on the number of substring replacements to be made. + +When the replacement limit is 0, the result is the original string. When the +limit is a negative number, the function behaves the same as replace all. + + .replace(, ) -> + .replace(, , ) -> + +Examples: + + 'hello hello'.replace('he', 'we') // returns 'wello wello' + 'hello hello'.replace('he', 'we', -1) // returns 'wello wello' + 'hello hello'.replace('he', 'we', 1) // returns 'wello hello' + 'hello hello'.replace('he', 'we', 0) // returns 'hello hello' + +### Split + +Returns a list of strings split from the input by the given separator. The +function accepts an optional argument specifying a limit on the number of +substrings produced by the split. + +When the split limit is 0, the result is an empty list. When the limit is 1, +the result is the target string to split. When the limit is a negative +number, the function behaves the same as split all. + + .split() -> > + .split(, ) -> > + +Examples: + + 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello'] + 'hello hello hello'.split(' ', 0) // returns [] + 'hello hello hello'.split(' ', 1) // returns ['hello hello hello'] + 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello'] + 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello'] + +### Substring + +Returns the substring given a numeric range corresponding to character +positions. Optionally may omit the trailing range for a substring from a given +character position until the end of a string. + +Character offsets are 0-based with an inclusive start range and exclusive end +range. It is an error to specify an end range that is lower than the start +range, or for either the start or end index to be negative or exceed the string +length. + + .substring() -> + .substring(, ) -> + +Examples: + + 'tacocat'.substring(4) // returns 'cat' + 'tacocat'.substring(0, 4) // returns 'taco' + 'tacocat'.substring(-1) // error + 'tacocat'.substring(2, 1) // error + +### Trim + +Returns a new string which removes the leading and trailing whitespace in the +target string. The trim function uses the Unicode definition of whitespace +which does not include the zero-width spaces. See: +https://en.wikipedia.org/wiki/Whitespace_character#Unicode + + .trim() -> + +Examples: + + ' \ttrim\n '.trim() // returns 'trim' + +### UpperAscii + +Returns a new string where all ASCII characters are upper-cased. + +This function does not perform Unicode case-mapping for characters outside the +ASCII range. + + .upperAscii() -> + +Examples: + + 'TacoCat'.upperAscii() // returns 'TACOCAT' + 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' + +### Reverse + +Returns a new string whose characters are the same as the target string, only formatted in +reverse order. +This function relies on converting strings to rune arrays in order to reverse. +It can be located in Version 3 of strings. + + .reverse() -> + +Examples: + + 'gums'.reverse() // returns 'smug' + 'John Smith'.reverse() // returns 'htimS nhoJ' \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/ext/bindings.go b/vendor/github.com/google/cel-go/ext/bindings.go new file mode 100644 index 00000000..2c6cc627 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/bindings.go @@ -0,0 +1,96 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" +) + +// Bindings returns a cel.EnvOption to configure support for local variable +// bindings in expressions. +// +// # Cel.Bind +// +// Binds a simple identifier to an initialization expression which may be used +// in a subsequenct result expression. Bindings may also be nested within each +// other. +// +// cel.bind(, , ) +// +// Examples: +// +// cel.bind(a, 'hello', +// cel.bind(b, 'world', a + b + b + a)) // "helloworldworldhello" +// +// // Avoid a list allocation within the exists comprehension. +// cel.bind(valid_values, [a, b, c], +// [d, e, f].exists(elem, elem in valid_values)) +// +// Local bindings are not guaranteed to be evaluated before use. +func Bindings() cel.EnvOption { + return cel.Lib(celBindings{}) +} + +const ( + celNamespace = "cel" + bindMacro = "bind" + unusedIterVar = "#unused" +) + +type celBindings struct{} + +func (celBindings) LibraryName() string { + return "cel.lib.ext.cel.bindings" +} + +func (celBindings) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Macros( + // cel.bind(var, , ) + cel.ReceiverMacro(bindMacro, 3, celBind), + ), + } +} + +func (celBindings) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func celBind(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(celNamespace, target) { + return nil, nil + } + varIdent := args[0] + varName := "" + switch varIdent.Kind() { + case ast.IdentKind: + varName = varIdent.AsIdent() + default: + return nil, mef.NewError(varIdent.ID(), "cel.bind() variable names must be simple identifiers") + } + varInit := args[1] + resultExpr := args[2] + return mef.NewComprehension( + mef.NewList(), + unusedIterVar, + varName, + varInit, + mef.NewLiteral(types.False), + mef.NewIdent(varName), + resultExpr, + ), nil +} diff --git a/vendor/github.com/google/cel-go/ext/encoders.go b/vendor/github.com/google/cel-go/ext/encoders.go new file mode 100644 index 00000000..61ac0b77 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/encoders.go @@ -0,0 +1,87 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "encoding/base64" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Encoders returns a cel.EnvOption to configure extended functions for string, byte, and object +// encodings. +// +// # Base64.Decode +// +// Decodes base64-encoded string to bytes. +// +// This function will return an error if the string input is not base64-encoded. +// +// base64.decode() -> +// +// Examples: +// +// base64.decode('aGVsbG8=') // return b'hello' +// base64.decode('aGVsbG8') // error +// +// # Base64.Encode +// +// Encodes bytes to a base64-encoded string. +// +// base64.encode() -> +// +// Examples: +// +// base64.encode(b'hello') // return b'aGVsbG8=' +func Encoders() cel.EnvOption { + return cel.Lib(encoderLib{}) +} + +type encoderLib struct{} + +func (encoderLib) LibraryName() string { + return "cel.lib.ext.encoders" +} + +func (encoderLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Function("base64.decode", + cel.Overload("base64_decode_string", []*cel.Type{cel.StringType}, cel.BytesType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return bytesOrError(base64DecodeString(string(s))) + }))), + cel.Function("base64.encode", + cel.Overload("base64_encode_bytes", []*cel.Type{cel.BytesType}, cel.StringType, + cel.UnaryBinding(func(bytes ref.Val) ref.Val { + b := bytes.(types.Bytes) + return stringOrError(base64EncodeBytes([]byte(b))) + }))), + } +} + +func (encoderLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func base64DecodeString(str string) ([]byte, error) { + return base64.StdEncoding.DecodeString(str) +} + +func base64EncodeBytes(bytes []byte) (string, error) { + return base64.StdEncoding.EncodeToString(bytes), nil +} diff --git a/vendor/github.com/google/cel-go/ext/formatting.go b/vendor/github.com/google/cel-go/ext/formatting.go new file mode 100644 index 00000000..dbff613b --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/formatting.go @@ -0,0 +1,904 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "unicode" + + "golang.org/x/text/language" + "golang.org/x/text/message" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type clauseImpl func(ref.Val, string) (string, error) + +func clauseForType(argType ref.Type) (clauseImpl, error) { + switch argType { + case types.IntType, types.UintType: + return formatDecimal, nil + case types.StringType, types.BytesType, types.BoolType, types.NullType, types.TypeType: + return FormatString, nil + case types.TimestampType, types.DurationType: + // special case to ensure timestamps/durations get printed as CEL literals + return func(arg ref.Val, locale string) (string, error) { + argStrVal := arg.ConvertToType(types.StringType) + argStr := argStrVal.Value().(string) + if arg.Type() == types.TimestampType { + return fmt.Sprintf("timestamp(%q)", argStr), nil + } + if arg.Type() == types.DurationType { + return fmt.Sprintf("duration(%q)", argStr), nil + } + return "", fmt.Errorf("cannot convert argument of type %s to timestamp/duration", arg.Type().TypeName()) + }, nil + case types.ListType: + return formatList, nil + case types.MapType: + return formatMap, nil + case types.DoubleType: + // avoid formatFixed so we can output a period as the decimal separator in order + // to always be a valid CEL literal + return func(arg ref.Val, locale string) (string, error) { + argDouble, ok := arg.Value().(float64) + if !ok { + return "", fmt.Errorf("couldn't convert %s to float64", arg.Type().TypeName()) + } + fmtStr := fmt.Sprintf("%%.%df", defaultPrecision) + return fmt.Sprintf(fmtStr, argDouble), nil + }, nil + case types.TypeType: + return func(arg ref.Val, locale string) (string, error) { + return fmt.Sprintf("type(%s)", arg.Value().(string)), nil + }, nil + default: + return nil, fmt.Errorf("no formatting function for %s", argType.TypeName()) + } +} + +func formatList(arg ref.Val, locale string) (string, error) { + argList := arg.(traits.Lister) + argIterator := argList.Iterator() + var listStrBuilder strings.Builder + _, err := listStrBuilder.WriteRune('[') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + for argIterator.HasNext() == types.True { + member := argIterator.Next() + memberFormat, err := clauseForType(member.Type()) + if err != nil { + return "", err + } + unquotedStr, err := memberFormat(member, locale) + if err != nil { + return "", err + } + str := quoteForCEL(member, unquotedStr) + _, err = listStrBuilder.WriteString(str) + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + if argIterator.HasNext() == types.True { + _, err = listStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + } + } + _, err = listStrBuilder.WriteRune(']') + if err != nil { + return "", fmt.Errorf("error writing to list string: %w", err) + } + return listStrBuilder.String(), nil +} + +func formatMap(arg ref.Val, locale string) (string, error) { + argMap := arg.(traits.Mapper) + argIterator := argMap.Iterator() + type mapPair struct { + key string + value string + } + argPairs := make([]mapPair, argMap.Size().Value().(int64)) + i := 0 + for argIterator.HasNext() == types.True { + key := argIterator.Next() + var keyFormat clauseImpl + switch key.Type() { + case types.StringType, types.BoolType: + keyFormat = FormatString + case types.IntType, types.UintType: + keyFormat = formatDecimal + default: + return "", fmt.Errorf("no formatting function for map key of type %s", key.Type().TypeName()) + } + unquotedKeyStr, err := keyFormat(key, locale) + if err != nil { + return "", err + } + keyStr := quoteForCEL(key, unquotedKeyStr) + value, found := argMap.Find(key) + if !found { + return "", fmt.Errorf("could not find key: %q", key) + } + valueFormat, err := clauseForType(value.Type()) + if err != nil { + return "", err + } + unquotedValueStr, err := valueFormat(value, locale) + if err != nil { + return "", err + } + valueStr := quoteForCEL(value, unquotedValueStr) + argPairs[i] = mapPair{keyStr, valueStr} + i++ + } + sort.SliceStable(argPairs, func(x, y int) bool { + return argPairs[x].key < argPairs[y].key + }) + var mapStrBuilder strings.Builder + _, err := mapStrBuilder.WriteRune('{') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + for i, entry := range argPairs { + _, err = mapStrBuilder.WriteString(fmt.Sprintf("%s:%s", entry.key, entry.value)) + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + if i < len(argPairs)-1 { + _, err = mapStrBuilder.WriteString(", ") + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + } + } + _, err = mapStrBuilder.WriteRune('}') + if err != nil { + return "", fmt.Errorf("error writing to map string: %w", err) + } + return mapStrBuilder.String(), nil +} + +// quoteForCEL takes a formatted, unquoted value and quotes it in a manner suitable +// for embedding directly in CEL. +func quoteForCEL(refVal ref.Val, unquotedValue string) string { + switch refVal.Type() { + case types.StringType: + return fmt.Sprintf("%q", unquotedValue) + case types.BytesType: + return fmt.Sprintf("b%q", unquotedValue) + case types.DoubleType: + // special case to handle infinity/NaN + num := refVal.Value().(float64) + if math.IsInf(num, 1) || math.IsInf(num, -1) || math.IsNaN(num) { + return fmt.Sprintf("%q", unquotedValue) + } + return unquotedValue + default: + return unquotedValue + } +} + +// FormatString returns the string representation of a CEL value. +// +// It is used to implement the %s specifier in the (string).format() extension function. +func FormatString(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.ListType: + return formatList(arg, locale) + case types.MapType: + return formatMap(arg, locale) + case types.IntType, types.UintType, types.DoubleType, + types.BoolType, types.StringType, types.TimestampType, types.BytesType, types.DurationType, types.TypeType: + argStrVal := arg.ConvertToType(types.StringType) + argStr, ok := argStrVal.Value().(string) + if !ok { + return "", fmt.Errorf("could not convert argument %q to string", argStrVal) + } + return argStr, nil + case types.NullType: + return "null", nil + default: + return "", stringFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func formatDecimal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt, ok := arg.ConvertToType(types.IntType).Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + case types.UintType: + argInt, ok := arg.ConvertToType(types.UintType).Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf("%d", argInt), nil + default: + return "", decimalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func matchLanguage(locale string) (language.Tag, error) { + matcher, err := makeMatcher(locale) + if err != nil { + return language.Und, err + } + tag, _ := language.MatchStrings(matcher, locale) + return tag, nil +} + +func makeMatcher(locale string) (language.Matcher, error) { + tags := make([]language.Tag, 0) + tag, err := language.Parse(locale) + if err != nil { + return nil, err + } + tags = append(tags, tag) + return language.NewMatcher(tags), nil +} + +type stringFormatter struct{} + +func (c *stringFormatter) String(arg ref.Val, locale string) (string, error) { + return FormatString(arg, locale) +} + +func (c *stringFormatter) Decimal(arg ref.Val, locale string) (string, error) { + return formatDecimal(arg, locale) +} + +func (c *stringFormatter) Fixed(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", fixedPointFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to float64", argFloatVal.Value()) + } + fmtStr := fmt.Sprintf("%%.%df", *precision) + + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Scientific(precision *int) func(ref.Val, string) (string, error) { + if precision == nil { + precision = new(int) + *precision = defaultPrecision + } + return func(arg ref.Val, locale string) (string, error) { + strException := false + if arg.Type() == types.StringType { + argStr := arg.Value().(string) + if argStr == "NaN" || argStr == "Infinity" || argStr == "-Infinity" { + strException = true + } + } + if arg.Type() != types.DoubleType && !strException { + return "", scientificFormatError(runtimeID, arg.Type().TypeName()) + } + argFloatVal := arg.ConvertToType(types.DoubleType) + argFloat, ok := argFloatVal.Value().(float64) + if !ok { + return "", fmt.Errorf("could not convert \"%v\" to float64", argFloatVal.Value()) + } + matchedLocale, err := matchLanguage(locale) + if err != nil { + return "", fmt.Errorf("error matching locale: %w", err) + } + fmtStr := fmt.Sprintf("%%%de", *precision) + return message.NewPrinter(matchedLocale).Sprintf(fmtStr, argFloat), nil + } +} + +func (c *stringFormatter) Binary(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + // locale is intentionally unused as integers formatted as binary + // strings are locale-independent + return fmt.Sprintf("%b", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%b", argInt), nil + case types.BoolType: + argBool := arg.Value().(bool) + if argBool { + return "1", nil + } + return "0", nil + default: + return "", binaryFormatError(runtimeID, arg.Type().TypeName()) + } +} + +func (c *stringFormatter) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + fmtStr := "%x" + if useUpper { + fmtStr = "%X" + } + switch arg.Type() { + case types.StringType, types.BytesType: + if arg.Type() == types.BytesType { + return fmt.Sprintf(fmtStr, arg.Value().([]byte)), nil + } + return fmt.Sprintf(fmtStr, arg.Value().(string)), nil + case types.IntType: + argInt, ok := arg.Value().(int64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to int64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + case types.UintType: + argInt, ok := arg.Value().(uint64) + if !ok { + return "", fmt.Errorf("could not convert \"%s\" to uint64", arg.Value()) + } + return fmt.Sprintf(fmtStr, argInt), nil + default: + return "", hexFormatError(runtimeID, arg.Type().TypeName()) + } + } +} + +func (c *stringFormatter) Octal(arg ref.Val, locale string) (string, error) { + switch arg.Type() { + case types.IntType: + argInt := arg.Value().(int64) + return fmt.Sprintf("%o", argInt), nil + case types.UintType: + argInt := arg.Value().(uint64) + return fmt.Sprintf("%o", argInt), nil + default: + return "", octalFormatError(runtimeID, arg.Type().TypeName()) + } +} + +// stringFormatValidator implements the cel.ASTValidator interface allowing for static validation +// of string.format calls. +type stringFormatValidator struct{} + +// Name returns the name of the validator. +func (stringFormatValidator) Name() string { + return "cel.lib.ext.validate.functions.string.format" +} + +// Configure implements the ASTValidatorConfigurer interface and augments the list of functions to skip +// during homogeneous aggregate literal type-checks. +func (stringFormatValidator) Configure(config cel.MutableValidatorConfig) error { + functions := config.GetOrDefault(cel.HomogeneousAggregateLiteralExemptFunctions, []string{}).([]string) + functions = append(functions, "format") + return config.Set(cel.HomogeneousAggregateLiteralExemptFunctions, functions) +} + +// Validate parses all literal format strings and type checks the format clause against the argument +// at the corresponding ordinal within the list literal argument to the function, if one is specified. +func (stringFormatValidator) Validate(env *cel.Env, _ cel.ValidatorConfig, a *ast.AST, iss *cel.Issues) { + root := ast.NavigateAST(a) + formatCallExprs := ast.MatchDescendants(root, matchConstantFormatStringWithListLiteralArgs(a)) + for _, e := range formatCallExprs { + call := e.AsCall() + formatStr := call.Target().AsLiteral().Value().(string) + args := call.Args()[0].AsList().Elements() + formatCheck := &stringFormatChecker{ + args: args, + ast: a, + } + // use a placeholder locale, since locale doesn't affect syntax + _, err := parseFormatString(formatStr, formatCheck, formatCheck, "en_US") + if err != nil { + iss.ReportErrorAtID(getErrorExprID(e.ID(), err), err.Error()) + continue + } + seenArgs := formatCheck.argsRequested + if len(args) > seenArgs { + iss.ReportErrorAtID(e.ID(), + "too many arguments supplied to string.format (expected %d, got %d)", seenArgs, len(args)) + } + } +} + +// getErrorExprID determines which list literal argument triggered a type-disagreement for the +// purposes of more accurate error message reports. +func getErrorExprID(id int64, err error) int64 { + fmtErr, ok := err.(formatError) + if ok { + return fmtErr.id + } + wrapped := errors.Unwrap(err) + if wrapped != nil { + return getErrorExprID(id, wrapped) + } + return id +} + +// matchConstantFormatStringWithListLiteralArgs matches all valid expression nodes for string +// format checking. +func matchConstantFormatStringWithListLiteralArgs(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if !call.IsMemberFunction() || call.FunctionName() != "format" { + return false + } + overloadIDs := a.GetOverloadIDs(e.ID()) + if len(overloadIDs) != 0 { + found := false + for _, overload := range overloadIDs { + if overload == overloads.ExtFormatString { + found = true + break + } + } + if !found { + return false + } + } + formatString := call.Target() + if formatString.Kind() != ast.LiteralKind || formatString.AsLiteral().Type() != cel.StringType { + return false + } + args := call.Args() + if len(args) != 1 { + return false + } + formatArgs := args[0] + return formatArgs.Kind() == ast.ListKind + } +} + +// stringFormatChecker implements the formatStringInterpolater interface +type stringFormatChecker struct { + args []ast.Expr + argsRequested int + currArgIndex int64 + ast *ast.AST +} + +func (c *stringFormatChecker) String(arg ref.Val, locale string) (string, error) { + formatArg := c.args[c.currArgIndex] + valid, badID := c.verifyString(formatArg) + if !valid { + return "", stringFormatError(badID, c.typeOf(badID).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Decimal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", decimalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Fixed(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + // we allow StringType since "NaN", "Infinity", and "-Infinity" are also valid values + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", fixedPointFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Scientific(precision *int) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.DoubleType, types.StringType) + if !valid { + return "", scientificFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Binary(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.BoolType) + if !valid { + return "", binaryFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Hex(useUpper bool) func(ref.Val, string) (string, error) { + return func(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType, types.StringType, types.BytesType) + if !valid { + return "", hexFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil + } +} + +func (c *stringFormatChecker) Octal(arg ref.Val, locale string) (string, error) { + id := c.args[c.currArgIndex].ID() + valid := c.verifyTypeOneOf(id, types.IntType, types.UintType) + if !valid { + return "", octalFormatError(id, c.typeOf(id).TypeName()) + } + return "", nil +} + +func (c *stringFormatChecker) Arg(index int64) (ref.Val, error) { + c.argsRequested++ + c.currArgIndex = index + // return a dummy value - this is immediately passed to back to us + // through one of the FormatCallback functions, so anything will do + return types.Int(0), nil +} + +func (c *stringFormatChecker) Size() int64 { + return int64(len(c.args)) +} + +func (c *stringFormatChecker) typeOf(id int64) *cel.Type { + return c.ast.GetType(id) +} + +func (c *stringFormatChecker) verifyTypeOneOf(id int64, validTypes ...*cel.Type) bool { + t := c.typeOf(id) + if t == cel.DynType { + return true + } + for _, vt := range validTypes { + // Only check runtime type compatibility without delving deeper into parameterized types + if t.Kind() == vt.Kind() { + return true + } + } + return false +} + +func (c *stringFormatChecker) verifyString(sub ast.Expr) (bool, int64) { + paramA := cel.TypeParamType("A") + paramB := cel.TypeParamType("B") + subVerified := c.verifyTypeOneOf(sub.ID(), + cel.ListType(paramA), cel.MapType(paramA, paramB), + cel.IntType, cel.UintType, cel.DoubleType, cel.BoolType, cel.StringType, + cel.TimestampType, cel.BytesType, cel.DurationType, cel.TypeType, cel.NullType) + if !subVerified { + return false, sub.ID() + } + switch sub.Kind() { + case ast.ListKind: + for _, e := range sub.AsList().Elements() { + // recursively verify if we're dealing with a list/map + verified, id := c.verifyString(e) + if !verified { + return false, id + } + } + return true, sub.ID() + case ast.MapKind: + for _, e := range sub.AsMap().Entries() { + // recursively verify if we're dealing with a list/map + entry := e.AsMapEntry() + verified, id := c.verifyString(entry.Key()) + if !verified { + return false, id + } + verified, id = c.verifyString(entry.Value()) + if !verified { + return false, id + } + } + return true, sub.ID() + default: + return true, sub.ID() + } +} + +// helper routines for reporting common errors during string formatting static validation and +// runtime execution. + +func binaryFormatError(id int64, badType string) error { + return newFormatError(id, "only integers and bools can be formatted as binary, was given %s", badType) +} + +func decimalFormatError(id int64, badType string) error { + return newFormatError(id, "decimal clause can only be used on integers, was given %s", badType) +} + +func fixedPointFormatError(id int64, badType string) error { + return newFormatError(id, "fixed-point clause can only be used on doubles, was given %s", badType) +} + +func hexFormatError(id int64, badType string) error { + return newFormatError(id, "only integers, byte buffers, and strings can be formatted as hex, was given %s", badType) +} + +func octalFormatError(id int64, badType string) error { + return newFormatError(id, "octal clause can only be used on integers, was given %s", badType) +} + +func scientificFormatError(id int64, badType string) error { + return newFormatError(id, "scientific clause can only be used on doubles, was given %s", badType) +} + +func stringFormatError(id int64, badType string) error { + return newFormatError(id, "string clause can only be used on strings, bools, bytes, ints, doubles, maps, lists, types, durations, and timestamps, was given %s", badType) +} + +type formatError struct { + id int64 + msg string +} + +func newFormatError(id int64, msg string, args ...any) error { + return formatError{ + id: id, + msg: fmt.Sprintf(msg, args...), + } +} + +func (e formatError) Error() string { + return e.msg +} + +func (e formatError) Is(target error) bool { + return e.msg == target.Error() +} + +// stringArgList implements the formatListArgs interface. +type stringArgList struct { + args traits.Lister +} + +func (c *stringArgList) Arg(index int64) (ref.Val, error) { + if index >= c.args.Size().Value().(int64) { + return nil, fmt.Errorf("index %d out of range", index) + } + return c.args.Get(types.Int(index)), nil +} + +func (c *stringArgList) Size() int64 { + return c.args.Size().Value().(int64) +} + +// formatStringInterpolator is an interface that allows user-defined behavior +// for formatting clause implementations, as well as argument retrieval. +// Each function is expected to support the appropriate types as laid out in +// the string.format documentation, and to return an error if given an inappropriate type. +type formatStringInterpolator interface { + // String takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a string, or an error if one occurred. + String(ref.Val, string) (string, error) + + // Decimal takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a decimal integer, or an error if one occurred. + Decimal(ref.Val, string) (string, error) + + // Fixed takes an int pointer representing precision (or nil if none was given) and + // returns a function operating in a similar manner to String and Decimal, taking a + // ref.Val and locale and returning the appropriate string. A closure is returned + // so precision can be set without needing an additional function call/configuration. + Fixed(*int) func(ref.Val, string) (string, error) + + // Scientific functions identically to Fixed, except the string returned from the closure + // is expected to be in scientific notation. + Scientific(*int) func(ref.Val, string) (string, error) + + // Binary takes a ref.Val and a string representing the current locale identifier + // and returns the Val formatted as a binary integer, or an error if one occurred. + Binary(ref.Val, string) (string, error) + + // Hex takes a boolean that, if true, indicates the hex string output by the returned + // closure should use uppercase letters for A-F. + Hex(bool) func(ref.Val, string) (string, error) + + // Octal takes a ref.Val and a string representing the current locale identifier and + // returns the Val formatted in octal, or an error if one occurred. + Octal(ref.Val, string) (string, error) +} + +// formatListArgs is an interface that allows user-defined list-like datatypes to be used +// for formatting clause implementations. +type formatListArgs interface { + // Arg returns the ref.Val at the given index, or an error if one occurred. + Arg(int64) (ref.Val, error) + + // Size returns the length of the argument list. + Size() int64 +} + +// parseFormatString formats a string according to the string.format syntax, taking the clause implementations +// from the provided FormatCallback and the args from the given FormatList. +func parseFormatString(formatStr string, callback formatStringInterpolator, list formatListArgs, locale string) (string, error) { + i := 0 + argIndex := 0 + var builtStr strings.Builder + for i < len(formatStr) { + if formatStr[i] == '%' { + if i+1 < len(formatStr) && formatStr[i+1] == '%' { + err := builtStr.WriteByte('%') + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += 2 + continue + } else { + argAny, err := list.Arg(int64(argIndex)) + if err != nil { + return "", err + } + if i+1 >= len(formatStr) { + return "", errors.New("unexpected end of string") + } + if int64(argIndex) >= list.Size() { + return "", fmt.Errorf("index %d out of range", argIndex) + } + numRead, val, refErr := parseAndFormatClause(formatStr[i:], argAny, callback, list, locale) + if refErr != nil { + return "", refErr + } + _, err = builtStr.WriteString(val) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i += numRead + argIndex++ + } + } else { + err := builtStr.WriteByte(formatStr[i]) + if err != nil { + return "", fmt.Errorf("error writing format string: %w", err) + } + i++ + } + } + return builtStr.String(), nil +} + +// parseAndFormatClause parses the format clause at the start of the given string with val, and returns +// how many characters were consumed and the substituted string form of val, or an error if one occurred. +func parseAndFormatClause(formatStr string, val ref.Val, callback formatStringInterpolator, list formatListArgs, locale string) (int, string, error) { + i := 1 + read, formatter, err := parseFormattingClause(formatStr[i:], callback) + i += read + if err != nil { + return -1, "", newParseFormatError("could not parse formatting clause", err) + } + + valStr, err := formatter(val, locale) + if err != nil { + return -1, "", newParseFormatError("error during formatting", err) + } + return i, valStr, nil +} + +func parseFormattingClause(formatStr string, callback formatStringInterpolator) (int, clauseImpl, error) { + i := 0 + read, precision, err := parsePrecision(formatStr[i:]) + i += read + if err != nil { + return -1, nil, fmt.Errorf("error while parsing precision: %w", err) + } + r := rune(formatStr[i]) + i++ + switch r { + case 's': + return i, callback.String, nil + case 'd': + return i, callback.Decimal, nil + case 'f': + return i, callback.Fixed(precision), nil + case 'e': + return i, callback.Scientific(precision), nil + case 'b': + return i, callback.Binary, nil + case 'x', 'X': + return i, callback.Hex(unicode.IsUpper(r)), nil + case 'o': + return i, callback.Octal, nil + default: + return -1, nil, fmt.Errorf("unrecognized formatting clause \"%c\"", r) + } +} + +func parsePrecision(formatStr string) (int, *int, error) { + i := 0 + if formatStr[i] != '.' { + return i, nil, nil + } + i++ + var buffer strings.Builder + for { + if i >= len(formatStr) { + return -1, nil, errors.New("could not find end of precision specifier") + } + if !isASCIIDigit(rune(formatStr[i])) { + break + } + buffer.WriteByte(formatStr[i]) + i++ + } + precision, err := strconv.Atoi(buffer.String()) + if err != nil { + return -1, nil, fmt.Errorf("error while converting precision to integer: %w", err) + } + return i, &precision, nil +} + +func isASCIIDigit(r rune) bool { + return r <= unicode.MaxASCII && unicode.IsDigit(r) +} + +type parseFormatError struct { + msg string + wrapped error +} + +func newParseFormatError(msg string, wrapped error) error { + return parseFormatError{msg: msg, wrapped: wrapped} +} + +func (e parseFormatError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.wrapped.Error()) +} + +func (e parseFormatError) Is(target error) bool { + return e.Error() == target.Error() +} + +func (e parseFormatError) Unwrap() error { + return e.wrapped +} + +const ( + runtimeID = int64(-1) +) diff --git a/vendor/github.com/google/cel-go/ext/guards.go b/vendor/github.com/google/cel-go/ext/guards.go new file mode 100644 index 00000000..2c00bfe3 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/guards.go @@ -0,0 +1,63 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// function invocation guards for common call signatures within extension functions. + +func intOrError(i int64, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.Int(i) +} + +func bytesOrError(bytes []byte, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.Bytes(bytes) +} + +func stringOrError(str string, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.String(str) +} + +func listStringOrError(strs []string, err error) ref.Val { + if err != nil { + return types.NewErr(err.Error()) + } + return types.DefaultTypeAdapter.NativeToValue(strs) +} + +func macroTargetMatchesNamespace(ns string, target ast.Expr) bool { + switch target.Kind() { + case ast.IdentKind: + if target.AsIdent() != ns { + return false + } + return true + default: + return false + } +} diff --git a/vendor/github.com/google/cel-go/ext/lists.go b/vendor/github.com/google/cel-go/ext/lists.go new file mode 100644 index 00000000..08751d08 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/lists.go @@ -0,0 +1,94 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Lists returns a cel.EnvOption to configure extended functions for list manipulation. +// As a general note, all indices are zero-based. +// # Slice +// +// Returns a new sub-list using the indexes provided. +// +// .slice(, ) -> +// +// Examples: +// +// [1,2,3,4].slice(1, 3) // return [2, 3] +// [1,2,3,4].slice(2, 4) // return [3 ,4] +func Lists() cel.EnvOption { + return cel.Lib(listsLib{}) +} + +type listsLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (listsLib) LibraryName() string { + return "cel.lib.ext.lists" +} + +// CompileOptions implements the Library interface method. +func (listsLib) CompileOptions() []cel.EnvOption { + listType := cel.ListType(cel.TypeParamType("T")) + return []cel.EnvOption{ + cel.Function("slice", + cel.MemberOverload("list_slice", + []*cel.Type{listType, cel.IntType, cel.IntType}, listType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + list := args[0].(traits.Lister) + start := args[1].(types.Int) + end := args[2].(types.Int) + result, err := slice(list, start, end) + if err != nil { + return types.WrapErr(err) + } + return result + }), + ), + ), + } +} + +// ProgramOptions implements the Library interface method. +func (listsLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func slice(list traits.Lister, start, end types.Int) (ref.Val, error) { + listLength := list.Size().(types.Int) + if start < 0 || end < 0 { + return nil, fmt.Errorf("cannot slice(%d, %d), negative indexes not supported", start, end) + } + if start > end { + return nil, fmt.Errorf("cannot slice(%d, %d), start index must be less than or equal to end index", start, end) + } + if listLength < end { + return nil, fmt.Errorf("cannot slice(%d, %d), list is length %d", start, end, listLength) + } + + var newList []ref.Val + for i := types.Int(start); i < end; i++ { + val := list.Get(i) + newList = append(newList, val) + } + return types.DefaultTypeAdapter.NativeToValue(newList), nil +} diff --git a/vendor/github.com/google/cel-go/ext/math.go b/vendor/github.com/google/cel-go/ext/math.go new file mode 100644 index 00000000..893654e7 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/math.go @@ -0,0 +1,895 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "fmt" + "math" + "strings" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Math returns a cel.EnvOption to configure namespaced math helper macros and +// functions. +// +// Note, all macros use the 'math' namespace; however, at the time of macro +// expansion the namespace looks just like any other identifier. If you are +// currently using a variable named 'math', the macro will likely work just as +// intended; however, there is some chance for collision. +// +// # Math.Greatest +// +// Returns the greatest valued number present in the arguments to the macro. +// +// Greatest is a variable argument count macro which must take at least one +// argument. Simple numeric and list literals are supported as valid argument +// types; however, other literals will be flagged as errors during macro +// expansion. If the argument expression does not resolve to a numeric or +// list(numeric) type during type-checking, or during runtime then an error +// will be produced. If a list argument is empty, this too will produce an +// error. +// +// math.greatest(, ...) -> +// +// Examples: +// +// math.greatest(1) // 1 +// math.greatest(1u, 2u) // 2u +// math.greatest(-42.0, -21.5, -100.0) // -21.5 +// math.greatest([-42.0, -21.5, -100.0]) // -21.5 +// math.greatest(numbers) // numbers must be list(numeric) +// +// math.greatest() // parse error +// math.greatest('string') // parse error +// math.greatest(a, b) // check-time error if a or b is non-numeric +// math.greatest(dyn('string')) // runtime error +// +// # Math.Least +// +// Returns the least valued number present in the arguments to the macro. +// +// Least is a variable argument count macro which must take at least one +// argument. Simple numeric and list literals are supported as valid argument +// types; however, other literals will be flagged as errors during macro +// expansion. If the argument expression does not resolve to a numeric or +// list(numeric) type during type-checking, or during runtime then an error +// will be produced. If a list argument is empty, this too will produce an +// error. +// +// math.least(, ...) -> +// +// Examples: +// +// math.least(1) // 1 +// math.least(1u, 2u) // 1u +// math.least(-42.0, -21.5, -100.0) // -100.0 +// math.least([-42.0, -21.5, -100.0]) // -100.0 +// math.least(numbers) // numbers must be list(numeric) +// +// math.least() // parse error +// math.least('string') // parse error +// math.least(a, b) // check-time error if a or b is non-numeric +// math.least(dyn('string')) // runtime error +// +// # Math.BitOr +// +// Introduced at version: 1 +// +// Performs a bitwise-OR operation over two int or uint values. +// +// math.bitOr(, ) -> +// math.bitOr(, ) -> +// +// Examples: +// +// math.bitOr(1u, 2u) // returns 3u +// math.bitOr(-2, -4) // returns -2 +// +// # Math.BitAnd +// +// Introduced at version: 1 +// +// Performs a bitwise-AND operation over two int or uint values. +// +// math.bitAnd(, ) -> +// math.bitAnd(, ) -> +// +// Examples: +// +// math.bitAnd(3u, 2u) // return 2u +// math.bitAnd(3, 5) // returns 3 +// math.bitAnd(-3, -5) // returns -7 +// +// # Math.BitXor +// +// Introduced at version: 1 +// +// math.bitXor(, ) -> +// math.bitXor(, ) -> +// +// Performs a bitwise-XOR operation over two int or uint values. +// +// Examples: +// +// math.bitXor(3u, 5u) // returns 6u +// math.bitXor(1, 3) // returns 2 +// +// # Math.BitNot +// +// Introduced at version: 1 +// +// Function which accepts a single int or uint and performs a bitwise-NOT +// ones-complement of the given binary value. +// +// math.bitNot() -> +// math.bitNot() -> +// +// Examples +// +// math.bitNot(1) // returns -1 +// math.bitNot(-1) // return 0 +// math.bitNot(0u) // returns 18446744073709551615u +// +// # Math.BitShiftLeft +// +// Introduced at version: 1 +// +// Perform a left shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will be always be returned +// since the number of bits shifted is greater than or equal to the total bit +// length of the number being shifted. Negative valued bit shifts will result +// in a runtime error. +// +// math.bitShiftLeft(, ) -> +// math.bitShiftLeft(, ) -> +// +// Examples +// +// math.bitShiftLeft(1, 2) // returns 4 +// math.bitShiftLeft(-1, 2) // returns -4 +// math.bitShiftLeft(1u, 2) // return 4u +// math.bitShiftLeft(1u, 200) // returns 0u +// +// # Math.BitShiftRight +// +// Introduced at version: 1 +// +// Perform a right shift of bits on the first parameter, by the amount of bits +// specified in the second parameter. The first parameter is either a uint or +// an int. The second parameter must be an int. +// +// When the second parameter is 64 or greater, 0 will always be returned since +// the number of bits shifted is greater than or equal to the total bit length +// of the number being shifted. Negative valued bit shifts will result in a +// runtime error. +// +// The sign bit extension will not be preserved for this operation: vacant bits +// on the left are filled with 0. +// +// math.bitShiftRight(, ) -> +// math.bitShiftRight(, ) -> +// +// Examples +// +// math.bitShiftRight(1024, 2) // returns 256 +// math.bitShiftRight(1024u, 2) // returns 256u +// math.bitShiftRight(1024u, 64) // returns 0u +// +// # Math.Ceil +// +// Introduced at version: 1 +// +// Compute the ceiling of a double value. +// +// math.ceil() -> +// +// Examples: +// +// math.ceil(1.2) // returns 2.0 +// math.ceil(-1.2) // returns -1.0 +// +// # Math.Floor +// +// Introduced at version: 1 +// +// Compute the floor of a double value. +// +// math.floor() -> +// +// Examples: +// +// math.floor(1.2) // returns 1.0 +// math.floor(-1.2) // returns -2.0 +// +// # Math.Round +// +// Introduced at version: 1 +// +// Rounds the double value to the nearest whole number with ties rounding away +// from zero, e.g. 1.5 -> 2.0, -1.5 -> -2.0. +// +// math.round() -> +// +// Examples: +// +// math.round(1.2) // returns 1.0 +// math.round(1.5) // returns 2.0 +// math.round(-1.5) // returns -2.0 +// +// # Math.Trunc +// +// Introduced at version: 1 +// +// Truncates the fractional portion of the double value. +// +// math.trunc() -> +// +// Examples: +// +// math.trunc(-1.3) // returns -1.0 +// math.trunc(1.3) // returns 1.0 +// +// # Math.Abs +// +// Introduced at version: 1 +// +// Returns the absolute value of the numeric type provided as input. If the +// value is NaN, the output is NaN. If the input is int64 min, the function +// will result in an overflow error. +// +// math.abs() -> +// math.abs() -> +// math.abs() -> +// +// Examples: +// +// math.abs(-1) // returns 1 +// math.abs(1) // returns 1 +// math.abs(-9223372036854775808) // overflow error +// +// # Math.Sign +// +// Introduced at version: 1 +// +// Returns the sign of the numeric type, either -1, 0, 1 as an int, double, or +// uint depending on the overload. For floating point values, if NaN is +// provided as input, the output is also NaN. The implementation does not +// differentiate between positive and negative zero. +// +// math.sign() -> +// math.sign() -> +// math.sign() -> +// +// Examples: +// +// math.sign(-42) // returns -1 +// math.sign(0) // returns 0 +// math.sign(42) // returns 1 +// +// # Math.IsInf +// +// Introduced at version: 1 +// +// Returns true if the input double value is -Inf or +Inf. +// +// math.isInf() -> +// +// Examples: +// +// math.isInf(1.0/0.0) // returns true +// math.isInf(1.2) // returns false +// +// # Math.IsNaN +// +// Introduced at version: 1 +// +// Returns true if the input double value is NaN, false otherwise. +// +// math.isNaN() -> +// +// Examples: +// +// math.isNaN(0.0/0.0) // returns true +// math.isNaN(1.2) // returns false +// +// # Math.IsFinite +// +// Introduced at version: 1 +// +// Returns true if the value is a finite number. Equivalent in behavior to: +// !math.isNaN(double) && !math.isInf(double) +// +// math.isFinite() -> +// +// Examples: +// +// math.isFinite(0.0/0.0) // returns false +// math.isFinite(1.2) // returns true +func Math() cel.EnvOption { + return cel.Lib(&mathLib{version: math.MaxUint32}) +} + +const ( + mathNamespace = "math" + leastMacro = "least" + greatestMacro = "greatest" + + // Min-max functions + minFunc = "math.@min" + maxFunc = "math.@max" + + // Rounding functions + ceilFunc = "math.ceil" + floorFunc = "math.floor" + roundFunc = "math.round" + truncFunc = "math.trunc" + + // Floating point helper functions + isInfFunc = "math.isInf" + isNanFunc = "math.isNaN" + isFiniteFunc = "math.isFinite" + + // Signedness functions + absFunc = "math.abs" + signFunc = "math.sign" + + // Bitwise functions + bitAndFunc = "math.bitAnd" + bitOrFunc = "math.bitOr" + bitXorFunc = "math.bitXor" + bitNotFunc = "math.bitNot" + bitShiftLeftFunc = "math.bitShiftLeft" + bitShiftRightFunc = "math.bitShiftRight" +) + +var ( + errIntOverflow = types.NewErr("integer overflow") +) + +type MathOption func(*mathLib) *mathLib + +func MathVersion(version uint32) MathOption { + return func(lib *mathLib) *mathLib { + lib.version = version + return lib + } +} + +type mathLib struct { + version uint32 +} + +// LibraryName implements the SingletonLibrary interface method. +func (*mathLib) LibraryName() string { + return "cel.lib.ext.math" +} + +// CompileOptions implements the Library interface method. +func (lib *mathLib) CompileOptions() []cel.EnvOption { + opts := []cel.EnvOption{ + cel.Macros( + // math.least(num, ...) + cel.ReceiverVarArgMacro(leastMacro, mathLeast), + // math.greatest(num, ...) + cel.ReceiverVarArgMacro(greatestMacro, mathGreatest), + ), + cel.Function(minFunc, + cel.Overload("math_@min_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(identity)), + cel.Overload("math_@min_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(identity)), + cel.Overload("math_@min_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + cel.Overload("math_@min_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(minPair)), + cel.Overload("math_@min_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType, + cel.UnaryBinding(minList)), + cel.Overload("math_@min_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType, + cel.UnaryBinding(minList)), + cel.Overload("math_@min_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType, + cel.UnaryBinding(minList)), + ), + cel.Function(maxFunc, + cel.Overload("math_@max_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(identity)), + cel.Overload("math_@max_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(identity)), + cel.Overload("math_@max_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + cel.Overload("math_@max_double_double", []*cel.Type{cel.DoubleType, cel.DoubleType}, cel.DoubleType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_int_uint", []*cel.Type{cel.IntType, cel.UintType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_int_double", []*cel.Type{cel.IntType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_double_int", []*cel.Type{cel.DoubleType, cel.IntType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_double_uint", []*cel.Type{cel.DoubleType, cel.UintType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_uint_double", []*cel.Type{cel.UintType, cel.DoubleType}, cel.DynType, + cel.BinaryBinding(maxPair)), + cel.Overload("math_@max_list_double", []*cel.Type{cel.ListType(cel.DoubleType)}, cel.DoubleType, + cel.UnaryBinding(maxList)), + cel.Overload("math_@max_list_int", []*cel.Type{cel.ListType(cel.IntType)}, cel.IntType, + cel.UnaryBinding(maxList)), + cel.Overload("math_@max_list_uint", []*cel.Type{cel.ListType(cel.UintType)}, cel.UintType, + cel.UnaryBinding(maxList)), + ), + } + if lib.version >= 1 { + opts = append(opts, + // Rounding function declarations + cel.Function(ceilFunc, + cel.Overload("math_ceil_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(ceil))), + cel.Function(floorFunc, + cel.Overload("math_floor_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(floor))), + cel.Function(roundFunc, + cel.Overload("math_round_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(round))), + cel.Function(truncFunc, + cel.Overload("math_trunc_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(trunc))), + + // Floating point helpers + cel.Function(isInfFunc, + cel.Overload("math_isInf_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isInf))), + cel.Function(isNanFunc, + cel.Overload("math_isNaN_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isNaN))), + cel.Function(isFiniteFunc, + cel.Overload("math_isFinite_double", []*cel.Type{cel.DoubleType}, cel.BoolType, + cel.UnaryBinding(isFinite))), + + // Signedness functions + cel.Function(absFunc, + cel.Overload("math_abs_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(absDouble)), + cel.Overload("math_abs_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(absInt)), + cel.Overload("math_abs_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(identity)), + ), + cel.Function(signFunc, + cel.Overload("math_sign_double", []*cel.Type{cel.DoubleType}, cel.DoubleType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(sign)), + cel.Overload("math_sign_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(sign)), + ), + + // Bitwise operator declarations + cel.Function(bitAndFunc, + cel.Overload("math_bitAnd_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitAndPairInt)), + cel.Overload("math_bitAnd_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitAndPairUint)), + ), + cel.Function(bitOrFunc, + cel.Overload("math_bitOr_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitOrPairInt)), + cel.Overload("math_bitOr_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitOrPairUint)), + ), + cel.Function(bitXorFunc, + cel.Overload("math_bitXor_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitXorPairInt)), + cel.Overload("math_bitXor_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType, + cel.BinaryBinding(bitXorPairUint)), + ), + cel.Function(bitNotFunc, + cel.Overload("math_bitNot_int_int", []*cel.Type{cel.IntType}, cel.IntType, + cel.UnaryBinding(bitNotInt)), + cel.Overload("math_bitNot_uint_uint", []*cel.Type{cel.UintType}, cel.UintType, + cel.UnaryBinding(bitNotUint)), + ), + cel.Function(bitShiftLeftFunc, + cel.Overload("math_bitShiftLeft_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftLeftIntInt)), + cel.Overload("math_bitShiftLeft_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftLeftUintInt)), + ), + cel.Function(bitShiftRightFunc, + cel.Overload("math_bitShiftRight_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType, + cel.BinaryBinding(bitShiftRightIntInt)), + cel.Overload("math_bitShiftRight_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType, + cel.BinaryBinding(bitShiftRightUintInt)), + ), + ) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (*mathLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(mathNamespace, target) { + return nil, nil + } + switch len(args) { + case 0: + return nil, meh.NewError(target.ID(), "math.least() requires at least one argument") + case 1: + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return meh.NewCall(minFunc, args[0]), nil + } + return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value") + case 2: + err := checkInvalidArgs(meh, "math.least()", args) + if err != nil { + return nil, err + } + return meh.NewCall(minFunc, args...), nil + default: + err := checkInvalidArgs(meh, "math.least()", args) + if err != nil { + return nil, err + } + return meh.NewCall(minFunc, meh.NewList(args...)), nil + } +} + +func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(mathNamespace, target) { + return nil, nil + } + switch len(args) { + case 0: + return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument") + case 1: + if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) { + return mef.NewCall(maxFunc, args[0]), nil + } + return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value") + case 2: + err := checkInvalidArgs(mef, "math.greatest()", args) + if err != nil { + return nil, err + } + return mef.NewCall(maxFunc, args...), nil + default: + err := checkInvalidArgs(mef, "math.greatest()", args) + if err != nil { + return nil, err + } + return mef.NewCall(maxFunc, mef.NewList(args...)), nil + } +} + +func identity(val ref.Val) ref.Val { + return val +} + +func ceil(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Ceil(float64(v))) +} + +func floor(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Floor(float64(v))) +} + +func round(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Round(float64(v))) +} + +func trunc(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Double(math.Trunc(float64(v))) +} + +func isInf(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsInf(float64(v), 0)) +} + +func isFinite(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Bool(!math.IsInf(v, 0) && !math.IsNaN(v)) +} + +func isNaN(val ref.Val) ref.Val { + v := val.(types.Double) + return types.Bool(math.IsNaN(float64(v))) +} + +func absDouble(val ref.Val) ref.Val { + v := float64(val.(types.Double)) + return types.Double(math.Abs(v)) +} + +func absInt(val ref.Val) ref.Val { + v := int64(val.(types.Int)) + if v == math.MinInt64 { + return errIntOverflow + } + if v >= 0 { + return val + } + return -types.Int(v) +} + +func sign(val ref.Val) ref.Val { + switch v := val.(type) { + case types.Double: + if isNaN(v) == types.True { + return v + } + zero := types.Double(0) + if v > zero { + return types.Double(1) + } + if v < zero { + return types.Double(-1) + } + return zero + case types.Int: + return v.Compare(types.IntZero) + case types.Uint: + if v == types.Uint(0) { + return types.Uint(0) + } + return types.Uint(1) + default: + return maybeSuffixError(val, "math.sign") + } +} + +func bitAndPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l & r +} + +func bitAndPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l & r +} + +func bitOrPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l | r +} + +func bitOrPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l | r +} + +func bitXorPairInt(first, second ref.Val) ref.Val { + l := first.(types.Int) + r := second.(types.Int) + return l ^ r +} + +func bitXorPairUint(first, second ref.Val) ref.Val { + l := first.(types.Uint) + r := second.(types.Uint) + return l ^ r +} + +func bitNotInt(value ref.Val) ref.Val { + v := value.(types.Int) + return ^v +} + +func bitNotUint(value ref.Val) ref.Val { + v := value.(types.Uint) + return ^v +} + +func bitShiftLeftIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftLeftUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftLeft() negative offset: %d", bs) + } + return v << bs +} + +func bitShiftRightIntInt(value, bits ref.Val) ref.Val { + v := value.(types.Int) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return types.Int(types.Uint(v) >> bs) +} + +func bitShiftRightUintInt(value, bits ref.Val) ref.Val { + v := value.(types.Uint) + bs := bits.(types.Int) + if bs < types.IntZero { + return types.NewErr("math.bitShiftRight() negative offset: %d", bs) + } + return v >> bs +} + +func minPair(first, second ref.Val) ref.Val { + cmp, ok := first.(traits.Comparer) + if !ok { + return types.MaybeNoSuchOverloadErr(first) + } + out := cmp.Compare(second) + if types.IsUnknownOrError(out) { + return maybeSuffixError(out, "math.@min") + } + if out == types.IntOne { + return second + } + return first +} + +func minList(numList ref.Val) ref.Val { + l := numList.(traits.Lister) + size := l.Size().(types.Int) + if size == types.IntZero { + return types.NewErr("math.@min(list) argument must not be empty") + } + min := l.Get(types.IntZero) + for i := types.IntOne; i < size; i++ { + min = minPair(min, l.Get(i)) + } + switch min.Type() { + case types.IntType, types.DoubleType, types.UintType, types.UnknownType: + return min + default: + return types.NewErr("no such overload: math.@min") + } +} + +func maxPair(first, second ref.Val) ref.Val { + cmp, ok := first.(traits.Comparer) + if !ok { + return types.MaybeNoSuchOverloadErr(first) + } + out := cmp.Compare(second) + if types.IsUnknownOrError(out) { + return maybeSuffixError(out, "math.@max") + } + if out == types.IntNegOne { + return second + } + return first +} + +func maxList(numList ref.Val) ref.Val { + l := numList.(traits.Lister) + size := l.Size().(types.Int) + if size == types.IntZero { + return types.NewErr("math.@max(list) argument must not be empty") + } + max := l.Get(types.IntZero) + for i := types.IntOne; i < size; i++ { + max = maxPair(max, l.Get(i)) + } + switch max.Type() { + case types.IntType, types.DoubleType, types.UintType, types.UnknownType: + return max + default: + return types.NewErr("no such overload: math.@max") + } +} + +func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr) *cel.Error { + for _, arg := range args { + err := checkInvalidArgLiteral(funcName, arg) + if err != nil { + return meh.NewError(arg.ID(), err.Error()) + } + } + return nil +} + +func checkInvalidArgLiteral(funcName string, arg ast.Expr) error { + if !isNumericArgType(arg) { + return fmt.Errorf("%s simple literal arguments must be numeric", funcName) + } + return nil +} + +func isNumericArgType(arg ast.Expr) bool { + switch arg.Kind() { + case ast.LiteralKind: + c := ref.Val(arg.AsLiteral()) + switch c.(type) { + case types.Double, types.Int, types.Uint: + return true + default: + return false + } + case ast.ListKind, ast.MapKind, ast.StructKind: + return false + default: + return true + } +} + +func isListLiteralWithNumericArgs(arg ast.Expr) bool { + switch arg.Kind() { + case ast.ListKind: + list := arg.AsList() + if list.Size() == 0 { + return false + } + for _, e := range list.Elements() { + if !isNumericArgType(e) { + return false + } + } + return true + } + return false +} + +func maybeSuffixError(val ref.Val, suffix string) ref.Val { + if types.IsError(val) { + msg := val.(*types.Err).String() + if !strings.Contains(msg, suffix) { + return types.NewErr("%s: %s", msg, suffix) + } + } + return val +} diff --git a/vendor/github.com/google/cel-go/ext/native.go b/vendor/github.com/google/cel-go/ext/native.go new file mode 100644 index 00000000..35b0f1e3 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/native.go @@ -0,0 +1,752 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/pb" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + structpb "google.golang.org/protobuf/types/known/structpb" +) + +var ( + nativeObjTraitMask = traits.FieldTesterType | traits.IndexerType + jsonValueType = reflect.TypeOf(&structpb.Value{}) + jsonStructType = reflect.TypeOf(&structpb.Struct{}) +) + +// NativeTypes creates a type provider which uses reflect.Type and reflect.Value instances +// to produce type definitions that can be used within CEL. +// +// All struct types in Go are exposed to CEL via their simple package name and struct type name: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// } +// +// ``` +// +// The type `identity.Account` would be exported to CEL using the same qualified name, e.g. +// `identity.Account{ID: 1234}` would create a new `Account` instance with the `ID` field +// populated. +// +// Only exported fields are exposed via NativeTypes, and the type-mapping between Go and CEL +// is as follows: +// +// | Go type | CEL type | +// |-------------------------------------|-----------| +// | bool | bool | +// | []byte | bytes | +// | float32, float64 | double | +// | int, int8, int16, int32, int64 | int | +// | string | string | +// | uint, uint8, uint16, uint32, uint64 | uint | +// | time.Duration | duration | +// | time.Time | timestamp | +// | array, slice | list | +// | map | map | +// +// Please note, if you intend to configure support for proto messages in addition to native +// types, you will need to provide the protobuf types before the golang native types. The +// same advice holds if you are using custom type adapters and type providers. The native type +// provider composes over whichever type adapter and provider is configured in the cel.Env at +// the time that it is invoked. +// +// There is also the possibility to rename the fields of native structs by setting the `cel` tag +// for fields you want to override. In order to enable this feature, pass in the `EnableStructTag` +// option. Here is an example to see it in action: +// +// ```go +// package identity +// +// type Account struct { +// ID int +// OwnerName string `cel:"owner"` +// } +// +// ``` +// +// The `OwnerName` field is now accessible in CEL via `owner`, e.g. `identity.Account{owner: 'bob'}`. +// In case there are duplicated field names in the struct, an error will be returned. +func NativeTypes(args ...any) cel.EnvOption { + return func(env *cel.Env) (*cel.Env, error) { + nativeTypes := make([]any, 0, len(args)) + tpOptions := nativeTypeOptions{} + + for _, v := range args { + switch v := v.(type) { + case NativeTypesOption: + err := v(&tpOptions) + if err != nil { + return nil, err + } + default: + nativeTypes = append(nativeTypes, v) + } + } + + tp, err := newNativeTypeProvider(tpOptions, env.CELTypeAdapter(), env.CELTypeProvider(), nativeTypes...) + if err != nil { + return nil, err + } + + env, err = cel.CustomTypeAdapter(tp)(env) + if err != nil { + return nil, err + } + return cel.CustomTypeProvider(tp)(env) + } +} + +// NativeTypesOption is a functional interface for configuring handling of native types. +type NativeTypesOption func(*nativeTypeOptions) error + +type nativeTypeOptions struct { + // parseStructTags controls if CEL should support struct field renames, by parsing + // struct field tags. + parseStructTags bool +} + +// ParseStructTags configures if native types field names should be overridable by CEL struct tags. +func ParseStructTags(enabled bool) NativeTypesOption { + return func(ntp *nativeTypeOptions) error { + ntp.parseStructTags = true + return nil + } +} + +func newNativeTypeProvider(tpOptions nativeTypeOptions, adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) { + nativeTypes := make(map[string]*nativeType, len(refTypes)) + for _, refType := range refTypes { + switch rt := refType.(type) { + case reflect.Type: + result, err := newNativeTypes(tpOptions.parseStructTags, rt) + if err != nil { + return nil, err + } + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } + case reflect.Value: + result, err := newNativeTypes(tpOptions.parseStructTags, rt.Type()) + if err != nil { + return nil, err + } + for idx := range result { + nativeTypes[result[idx].TypeName()] = result[idx] + } + default: + return nil, fmt.Errorf("unsupported native type: %v (%T) must be reflect.Type or reflect.Value", rt, rt) + } + } + return &nativeTypeProvider{ + nativeTypes: nativeTypes, + baseAdapter: adapter, + baseProvider: provider, + options: tpOptions, + }, nil +} + +type nativeTypeProvider struct { + nativeTypes map[string]*nativeType + baseAdapter types.Adapter + baseProvider types.Provider + options nativeTypeOptions +} + +// EnumValue proxies to the types.Provider configured at the times the NativeTypes +// option was configured. +func (tp *nativeTypeProvider) EnumValue(enumName string) ref.Val { + return tp.baseProvider.EnumValue(enumName) +} + +// FindIdent looks up natives type instances by qualified identifier, and if not found +// proxies to the composed types.Provider. +func (tp *nativeTypeProvider) FindIdent(typeName string) (ref.Val, bool) { + if t, found := tp.nativeTypes[typeName]; found { + return t, true + } + return tp.baseProvider.FindIdent(typeName) +} + +// FindStructType looks up the CEL type definition by qualified identifier, and if not found +// proxies to the composed types.Provider. +func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool) { + if _, found := tp.nativeTypes[typeName]; found { + return types.NewTypeTypeWithParam(types.NewObjectType(typeName)), true + } + if celType, found := tp.baseProvider.FindStructType(typeName); found { + return celType, true + } + return tp.baseProvider.FindStructType(typeName) +} + +func toFieldName(parseStructTag bool, f reflect.StructField) string { + if !parseStructTag { + return f.Name + } + + if name, found := f.Tag.Lookup("cel"); found { + return name + } + + return f.Name +} + +// FindStructFieldNames looks up the type definition first from the native types, then from +// the backing provider type set. If found, a set of field names corresponding to the type +// will be returned. +func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + if t, found := tp.nativeTypes[typeName]; found { + fieldCount := t.refType.NumField() + fields := make([]string, fieldCount) + for i := 0; i < fieldCount; i++ { + fields[i] = toFieldName(tp.options.parseStructTags, t.refType.Field(i)) + } + return fields, true + } + if celTypeFields, found := tp.baseProvider.FindStructFieldNames(typeName); found { + return celTypeFields, true + } + return tp.baseProvider.FindStructFieldNames(typeName) +} + +// valueFieldByName retrieves the corresponding reflect.Value field for the given field name, by +// searching for a matching field tag value or field name. +func valueFieldByName(parseStructTags bool, target reflect.Value, fieldName string) reflect.Value { + if !parseStructTags { + return target.FieldByName(fieldName) + } + + for i := 0; i < target.Type().NumField(); i++ { + f := target.Type().Field(i) + if toFieldName(parseStructTags, f) == fieldName { + return target.FieldByIndex(f.Index) + } + } + return reflect.Value{} +} + +// FindStructFieldType looks up a native type's field definition, and if the type name is not a native +// type then proxies to the composed types.Provider +func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { + t, found := tp.nativeTypes[typeName] + if !found { + return tp.baseProvider.FindStructFieldType(typeName, fieldName) + } + refField, isDefined := t.hasField(fieldName) + if !found || !isDefined { + return nil, false + } + celType, ok := convertToCelType(refField.Type) + if !ok { + return nil, false + } + return &types.FieldType{ + Type: celType, + IsSet: func(obj any) bool { + refVal := reflect.Indirect(reflect.ValueOf(obj)) + refField := valueFieldByName(tp.options.parseStructTags, refVal, fieldName) + return !refField.IsZero() + }, + GetFrom: func(obj any) (any, error) { + refVal := reflect.Indirect(reflect.ValueOf(obj)) + refField := valueFieldByName(tp.options.parseStructTags, refVal, fieldName) + return getFieldValue(refField), nil + }, + }, true +} + +// NewValue implements the ref.TypeProvider interface method. +func (tp *nativeTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val { + t, found := tp.nativeTypes[typeName] + if !found { + return tp.baseProvider.NewValue(typeName, fields) + } + refPtr := reflect.New(t.refType) + refVal := refPtr.Elem() + for fieldName, val := range fields { + refFieldDef, isDefined := t.hasField(fieldName) + if !isDefined { + return types.NewErr("no such field: %s", fieldName) + } + fieldVal, err := val.ConvertToNative(refFieldDef.Type) + if err != nil { + return types.NewErr(err.Error()) + } + refField := refVal.FieldByIndex(refFieldDef.Index) + refFieldVal := reflect.ValueOf(fieldVal) + refField.Set(refFieldVal) + } + return tp.NativeToValue(refPtr.Interface()) +} + +// NewValue adapts native values to CEL values and will proxy to the composed type adapter +// for non-native types. +func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val { + if val == nil { + return types.NullValue + } + if v, ok := val.(ref.Val); ok { + return v + } + rawVal := reflect.ValueOf(val) + refVal := rawVal + if refVal.Kind() == reflect.Ptr { + refVal = reflect.Indirect(refVal) + } + // This isn't quite right if you're also supporting proto, + // but maybe an acceptable limitation. + switch refVal.Kind() { + case reflect.Array, reflect.Slice: + switch val := val.(type) { + case []byte: + return tp.baseAdapter.NativeToValue(val) + default: + if refVal.Type().Elem() == reflect.TypeOf(byte(0)) { + return tp.baseAdapter.NativeToValue(val) + } + return types.NewDynamicList(tp, val) + } + case reflect.Map: + return types.NewDynamicMap(tp, val) + case reflect.Struct: + switch val := val.(type) { + case proto.Message, *pb.Map, protoreflect.List, protoreflect.Message, protoreflect.Value, + time.Time: + return tp.baseAdapter.NativeToValue(val) + default: + return tp.newNativeObject(val, rawVal) + } + default: + return tp.baseAdapter.NativeToValue(val) + } +} + +func convertToCelType(refType reflect.Type) (*cel.Type, bool) { + switch refType.Kind() { + case reflect.Bool: + return cel.BoolType, true + case reflect.Float32, reflect.Float64: + return cel.DoubleType, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if refType == durationType { + return cel.DurationType, true + } + return cel.IntType, true + case reflect.String: + return cel.StringType, true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cel.UintType, true + case reflect.Array, reflect.Slice: + refElem := refType.Elem() + if refElem == reflect.TypeOf(byte(0)) { + return cel.BytesType, true + } + elemType, ok := convertToCelType(refElem) + if !ok { + return nil, false + } + return cel.ListType(elemType), true + case reflect.Map: + keyType, ok := convertToCelType(refType.Key()) + if !ok { + return nil, false + } + // Ensure the key type is a int, bool, uint, string + elemType, ok := convertToCelType(refType.Elem()) + if !ok { + return nil, false + } + return cel.MapType(keyType, elemType), true + case reflect.Struct: + if refType == timestampType { + return cel.TimestampType, true + } + return cel.ObjectType( + fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + ), true + case reflect.Pointer: + if refType.Implements(pbMsgInterfaceType) { + pbMsg := reflect.New(refType.Elem()).Interface().(protoreflect.ProtoMessage) + return cel.ObjectType(string(pbMsg.ProtoReflect().Descriptor().FullName())), true + } + return convertToCelType(refType.Elem()) + } + return nil, false +} + +func (tp *nativeTypeProvider) newNativeObject(val any, refValue reflect.Value) ref.Val { + valType, err := newNativeType(tp.options.parseStructTags, refValue.Type()) + if err != nil { + return types.NewErr(err.Error()) + } + return &nativeObj{ + Adapter: tp, + val: val, + valType: valType, + refValue: refValue, + } +} + +type nativeObj struct { + types.Adapter + val any + valType *nativeType + refValue reflect.Value +} + +// ConvertToNative implements the ref.Val interface method. +// +// CEL does not have a notion of pointers, so whether a field is a pointer or value +// is handled as part of this conversion step. +func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) { + if o.refValue.Type() == typeDesc { + return o.val, nil + } + if o.refValue.Kind() == reflect.Pointer && o.refValue.Type().Elem() == typeDesc { + return o.refValue.Elem().Interface(), nil + } + if typeDesc.Kind() == reflect.Pointer && o.refValue.Type() == typeDesc.Elem() { + ptr := reflect.New(typeDesc.Elem()) + ptr.Elem().Set(o.refValue) + return ptr.Interface(), nil + } + switch typeDesc { + case jsonValueType: + jsonStruct, err := o.ConvertToNative(jsonStructType) + if err != nil { + return nil, err + } + return structpb.NewStructValue(jsonStruct.(*structpb.Struct)), nil + case jsonStructType: + refVal := reflect.Indirect(o.refValue) + refType := refVal.Type() + fields := make(map[string]*structpb.Value, refVal.NumField()) + for i := 0; i < refVal.NumField(); i++ { + fieldType := refType.Field(i) + fieldValue := refVal.Field(i) + if !fieldValue.IsValid() || fieldValue.IsZero() { + continue + } + fieldName := toFieldName(o.valType.parseStructTags, fieldType) + fieldCELVal := o.NativeToValue(fieldValue.Interface()) + fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType) + if err != nil { + return nil, err + } + fields[fieldName] = fieldJSONVal.(*structpb.Value) + } + return &structpb.Struct{Fields: fields}, nil + } + return nil, fmt.Errorf("type conversion error from '%v' to '%v'", o.Type(), typeDesc) +} + +// ConvertToType implements the ref.Val interface method. +func (o *nativeObj) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case types.TypeType: + return o.valType + default: + if typeVal.TypeName() == o.valType.typeName { + return o + } + } + return types.NewErr("type conversion error from '%s' to '%s'", o.Type(), typeVal) +} + +// Equal implements the ref.Val interface method. +// +// Note, that in Golang a pointer to a value is not equal to the value it contains. +// In CEL pointers and values to which they point are equal. +func (o *nativeObj) Equal(other ref.Val) ref.Val { + otherNtv, ok := other.(*nativeObj) + if !ok { + return types.False + } + val := o.val + otherVal := otherNtv.val + refVal := o.refValue + otherRefVal := otherNtv.refValue + if refVal.Kind() != otherRefVal.Kind() { + if refVal.Kind() == reflect.Pointer { + val = refVal.Elem().Interface() + } else if otherRefVal.Kind() == reflect.Pointer { + otherVal = otherRefVal.Elem().Interface() + } + } + return types.Bool(reflect.DeepEqual(val, otherVal)) +} + +// IsZeroValue indicates whether the contained Golang value is a zero value. +// +// Golang largely follows proto3 semantics for zero values. +func (o *nativeObj) IsZeroValue() bool { + return reflect.Indirect(o.refValue).IsZero() +} + +// IsSet tests whether a field which is defined is set to a non-default value. +func (o *nativeObj) IsSet(field ref.Val) ref.Val { + refField, refErr := o.getReflectedField(field) + if refErr != nil { + return refErr + } + return types.Bool(!refField.IsZero()) +} + +// Get returns the value fo a field name. +func (o *nativeObj) Get(field ref.Val) ref.Val { + refField, refErr := o.getReflectedField(field) + if refErr != nil { + return refErr + } + return adaptFieldValue(o, refField) +} + +func (o *nativeObj) getReflectedField(field ref.Val) (reflect.Value, ref.Val) { + fieldName, ok := field.(types.String) + if !ok { + return reflect.Value{}, types.MaybeNoSuchOverloadErr(field) + } + fieldNameStr := string(fieldName) + refField, isDefined := o.valType.hasField(fieldNameStr) + if !isDefined { + return reflect.Value{}, types.NewErr("no such field: %s", fieldName) + } + refVal := reflect.Indirect(o.refValue) + return refVal.FieldByIndex(refField.Index), nil +} + +// Type implements the ref.Val interface method. +func (o *nativeObj) Type() ref.Type { + return o.valType +} + +// Value implements the ref.Val interface method. +func (o *nativeObj) Value() any { + return o.val +} + +func newNativeTypes(parseStructTags bool, rawType reflect.Type) ([]*nativeType, error) { + nt, err := newNativeType(parseStructTags, rawType) + if err != nil { + return nil, err + } + result := []*nativeType{nt} + + alreadySeen := make(map[string]struct{}) + var iterateStructMembers func(reflect.Type) + iterateStructMembers = func(t reflect.Type) { + if k := t.Kind(); k == reflect.Pointer || k == reflect.Slice || k == reflect.Array || k == reflect.Map { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return + } + if _, seen := alreadySeen[t.String()]; seen { + return + } + alreadySeen[t.String()] = struct{}{} + nt, ntErr := newNativeType(parseStructTags, t) + if ntErr != nil { + err = ntErr + return + } + result = append(result, nt) + + for idx := 0; idx < t.NumField(); idx++ { + iterateStructMembers(t.Field(idx).Type) + } + } + iterateStructMembers(rawType) + + return result, err +} + +var ( + errDuplicatedFieldName = errors.New("field name already exists in struct") +) + +func newNativeType(parseStructTags bool, rawType reflect.Type) (*nativeType, error) { + refType := rawType + if refType.Kind() == reflect.Pointer { + refType = refType.Elem() + } + if !isValidObjectType(refType) { + return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType) + } + + // Since naming collisions can only happen with struct tag parsing, we only check for them if it is enabled. + if parseStructTags { + fieldNames := make(map[string]struct{}) + + for idx := 0; idx < refType.NumField(); idx++ { + field := refType.Field(idx) + fieldName := toFieldName(parseStructTags, field) + + if _, found := fieldNames[fieldName]; found { + return nil, fmt.Errorf("invalid field name `%s` in struct `%s`: %w", fieldName, refType.Name(), errDuplicatedFieldName) + } else { + fieldNames[fieldName] = struct{}{} + } + } + } + + return &nativeType{ + typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()), + refType: refType, + parseStructTags: parseStructTags, + }, nil +} + +type nativeType struct { + typeName string + refType reflect.Type + parseStructTags bool +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (t *nativeType) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion error for type to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (t *nativeType) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case types.TypeType: + return types.TypeType + } + return types.NewErr("type conversion error from '%s' to '%s'", types.TypeType, typeVal) +} + +// Equal returns true of both type names are equal to each other. +func (t *nativeType) Equal(other ref.Val) ref.Val { + otherType, ok := other.(ref.Type) + return types.Bool(ok && t.TypeName() == otherType.TypeName()) +} + +// HasTrait implements the ref.Type interface method. +func (t *nativeType) HasTrait(trait int) bool { + return nativeObjTraitMask&trait == trait +} + +// String implements the strings.Stringer interface method. +func (t *nativeType) String() string { + return t.typeName +} + +// Type implements the ref.Val interface method. +func (t *nativeType) Type() ref.Type { + return types.TypeType +} + +// TypeName implements the ref.Type interface method. +func (t *nativeType) TypeName() string { + return t.typeName +} + +// Value implements the ref.Val interface method. +func (t *nativeType) Value() any { + return t.typeName +} + +// fieldByName returns the corresponding reflect.StructField for the give name either by matching +// field tag or field name. +func (t *nativeType) fieldByName(fieldName string) (reflect.StructField, bool) { + if !t.parseStructTags { + return t.refType.FieldByName(fieldName) + } + + for i := 0; i < t.refType.NumField(); i++ { + f := t.refType.Field(i) + if toFieldName(t.parseStructTags, f) == fieldName { + return f, true + } + } + + return reflect.StructField{}, false +} + +// hasField returns whether a field name has a corresponding Golang reflect.StructField +func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) { + f, found := t.fieldByName(fieldName) + if !found || !f.IsExported() || !isSupportedType(f.Type) { + return reflect.StructField{}, false + } + return f, true +} + +func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val { + return adapter.NativeToValue(getFieldValue(refField)) +} + +func getFieldValue(refField reflect.Value) any { + if refField.IsZero() { + switch refField.Kind() { + case reflect.Struct: + if refField.Type() == timestampType { + return time.Unix(0, 0) + } + case reflect.Pointer: + return reflect.New(refField.Type().Elem()).Interface() + } + } + return refField.Interface() +} + +func simplePkgAlias(pkgPath string) string { + paths := strings.Split(pkgPath, "/") + if len(paths) == 0 { + return "" + } + return paths[len(paths)-1] +} + +func isValidObjectType(refType reflect.Type) bool { + return refType.Kind() == reflect.Struct +} + +func isSupportedType(refType reflect.Type) bool { + switch refType.Kind() { + case reflect.Chan, reflect.Complex64, reflect.Complex128, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: + return false + case reflect.Array, reflect.Slice: + return isSupportedType(refType.Elem()) + case reflect.Map: + return isSupportedType(refType.Key()) && isSupportedType(refType.Elem()) + } + return true +} + +var ( + pbMsgInterfaceType = reflect.TypeOf((*protoreflect.ProtoMessage)(nil)).Elem() + timestampType = reflect.TypeOf(time.Now()) + durationType = reflect.TypeOf(time.Nanosecond) +) diff --git a/vendor/github.com/google/cel-go/ext/protos.go b/vendor/github.com/google/cel-go/ext/protos.go new file mode 100644 index 00000000..68796f60 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/protos.go @@ -0,0 +1,140 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/ast" +) + +// Protos returns a cel.EnvOption to configure extended macros and functions for +// proto manipulation. +// +// Note, all macros use the 'proto' namespace; however, at the time of macro +// expansion the namespace looks just like any other identifier. If you are +// currently using a variable named 'proto', the macro will likely work just as +// intended; however, there is some chance for collision. +// +// # Protos.GetExt +// +// Macro which generates a select expression that retrieves an extension field +// from the input proto2 syntax message. If the field is not set, the default +// value forthe extension field is returned according to safe-traversal semantics. +// +// proto.getExt(, ) -> +// +// Examples: +// +// proto.getExt(msg, google.expr.proto2.test.int32_ext) // returns int value +// +// # Protos.HasExt +// +// Macro which generates a test-only select expression that determines whether +// an extension field is set on a proto2 syntax message. +// +// proto.hasExt(, ) -> +// +// Examples: +// +// proto.hasExt(msg, google.expr.proto2.test.int32_ext) // returns true || false +func Protos() cel.EnvOption { + return cel.Lib(protoLib{}) +} + +var ( + protoNamespace = "proto" + hasExtension = "hasExt" + getExtension = "getExt" +) + +type protoLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (protoLib) LibraryName() string { + return "cel.lib.ext.protos" +} + +// CompileOptions implements the Library interface method. +func (protoLib) CompileOptions() []cel.EnvOption { + return []cel.EnvOption{ + cel.Macros( + // proto.getExt(msg, select_expression) + cel.ReceiverMacro(getExtension, 2, getProtoExt), + // proto.hasExt(msg, select_expression) + cel.ReceiverMacro(hasExtension, 2, hasProtoExt), + ), + } +} + +// ProgramOptions implements the Library interface method. +func (protoLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +// hasProtoExt generates a test-only select expression for a fully-qualified extension name on a protobuf message. +func hasProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(protoNamespace, target) { + return nil, nil + } + extensionField, err := getExtFieldName(mef, args[1]) + if err != nil { + return nil, err + } + return mef.NewPresenceTest(args[0], extensionField), nil +} + +// getProtoExt generates a select expression for a fully-qualified extension name on a protobuf message. +func getProtoExt(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *cel.Error) { + if !macroTargetMatchesNamespace(protoNamespace, target) { + return nil, nil + } + extFieldName, err := getExtFieldName(mef, args[1]) + if err != nil { + return nil, err + } + return mef.NewSelect(args[0], extFieldName), nil +} + +func getExtFieldName(mef cel.MacroExprFactory, expr ast.Expr) (string, *cel.Error) { + isValid := false + extensionField := "" + switch expr.Kind() { + case ast.SelectKind: + extensionField, isValid = validateIdentifier(expr) + } + if !isValid { + return "", mef.NewError(expr.ID(), "invalid extension field") + } + return extensionField, nil +} + +func validateIdentifier(expr ast.Expr) (string, bool) { + switch expr.Kind() { + case ast.IdentKind: + return expr.AsIdent(), true + case ast.SelectKind: + sel := expr.AsSelect() + if sel.IsTestOnly() { + return "", false + } + opStr, isIdent := validateIdentifier(sel.Operand()) + if !isIdent { + return "", false + } + return opStr + "." + sel.FieldName(), true + default: + return "", false + } +} diff --git a/vendor/github.com/google/cel-go/ext/sets.go b/vendor/github.com/google/cel-go/ext/sets.go new file mode 100644 index 00000000..7e941665 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/sets.go @@ -0,0 +1,261 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ext + +import ( + "math" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter" +) + +// Sets returns a cel.EnvOption to configure namespaced set relationship +// functions. +// +// There is no set type within CEL, and while one may be introduced in the +// future, there are cases where a `list` type is known to behave like a set. +// For such cases, this library provides some basic functionality for +// determining set containment, equivalence, and intersection. +// +// # Sets.Contains +// +// Returns whether the first list argument contains all elements in the second +// list argument. The list may contain elements of any type and standard CEL +// equality is used to determine whether a value exists in both lists. If the +// second list is empty, the result will always return true. +// +// sets.contains(list(T), list(T)) -> bool +// +// Examples: +// +// sets.contains([], []) // true +// sets.contains([], [1]) // false +// sets.contains([1, 2, 3, 4], [2, 3]) // true +// sets.contains([1, 2.0, 3u], [1.0, 2u, 3]) // true +// +// # Sets.Equivalent +// +// Returns whether the first and second list are set equivalent. Lists are set +// equivalent if for every item in the first list, there is an element in the +// second which is equal. The lists may not be of the same size as they do not +// guarantee the elements within them are unique, so size does not factor into +// the computation. +// +// Examples: +// +// sets.equivalent([], []) // true +// sets.equivalent([1], [1, 1]) // true +// sets.equivalent([1], [1u, 1.0]) // true +// sets.equivalent([1, 2, 3], [3u, 2.0, 1]) // true +// +// # Sets.Intersects +// +// Returns whether the first list has at least one element whose value is equal +// to an element in the second list. If either list is empty, the result will +// be false. +// +// Examples: +// +// sets.intersects([1], []) // false +// sets.intersects([1], [1, 2]) // true +// sets.intersects([[1], [2, 3]], [[1, 2], [2, 3.0]]) // true +func Sets() cel.EnvOption { + return cel.Lib(setsLib{}) +} + +type setsLib struct{} + +// LibraryName implements the SingletonLibrary interface method. +func (setsLib) LibraryName() string { + return "cel.lib.ext.sets" +} + +// CompileOptions implements the Library interface method. +func (setsLib) CompileOptions() []cel.EnvOption { + listType := cel.ListType(cel.TypeParamType("T")) + return []cel.EnvOption{ + cel.Function("sets.contains", + cel.Overload("list_sets_contains_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsContains))), + cel.Function("sets.equivalent", + cel.Overload("list_sets_equivalent_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsEquivalent))), + cel.Function("sets.intersects", + cel.Overload("list_sets_intersects_list", []*cel.Type{listType, listType}, cel.BoolType, + cel.BinaryBinding(setsIntersects))), + cel.CostEstimatorOptions( + checker.OverloadCostEstimate("list_sets_contains_list", estimateSetsCost(1)), + checker.OverloadCostEstimate("list_sets_intersects_list", estimateSetsCost(1)), + // equivalence requires potentially two m*n comparisons to ensure each list is contained by the other + checker.OverloadCostEstimate("list_sets_equivalent_list", estimateSetsCost(2)), + ), + } +} + +// ProgramOptions implements the Library interface method. +func (setsLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{ + cel.CostTrackerOptions( + interpreter.OverloadCostTracker("list_sets_contains_list", trackSetsCost(1)), + interpreter.OverloadCostTracker("list_sets_intersects_list", trackSetsCost(1)), + interpreter.OverloadCostTracker("list_sets_equivalent_list", trackSetsCost(2)), + ), + } +} + +// NewSetMembershipOptimizer rewrites set membership tests using the `in` operator against a list +// of constant values of enum, int, uint, string, or boolean type into a set membership test against +// a map where the map keys are the elements of the list. +func NewSetMembershipOptimizer() (cel.ASTOptimizer, error) { + return setsLib{}, nil +} + +func (setsLib) Optimize(ctx *cel.OptimizerContext, a *ast.AST) *ast.AST { + root := ast.NavigateAST(a) + matches := ast.MatchDescendants(root, matchInConstantList(a)) + for _, match := range matches { + call := match.AsCall() + listArg := call.Args()[1] + entries := make([]ast.EntryExpr, len(listArg.AsList().Elements())) + for i, elem := range listArg.AsList().Elements() { + var entry ast.EntryExpr + if r, found := a.ReferenceMap()[elem.ID()]; found && r.Value != nil { + entry = ctx.NewMapEntry(ctx.NewLiteral(r.Value), ctx.NewLiteral(types.True), false) + } else { + entry = ctx.NewMapEntry(elem, ctx.NewLiteral(types.True), false) + } + entries[i] = entry + } + mapArg := ctx.NewMap(entries) + ctx.UpdateExpr(listArg, mapArg) + } + return a +} + +func matchInConstantList(a *ast.AST) ast.ExprMatcher { + return func(e ast.NavigableExpr) bool { + if e.Kind() != ast.CallKind { + return false + } + call := e.AsCall() + if call.FunctionName() != operators.In { + return false + } + aggregateVal := call.Args()[1] + if aggregateVal.Kind() != ast.ListKind { + return false + } + listVal := aggregateVal.AsList() + for _, elem := range listVal.Elements() { + if r, found := a.ReferenceMap()[elem.ID()]; found { + if r.Value != nil { + continue + } + } + if elem.Kind() != ast.LiteralKind { + return false + } + lit := elem.AsLiteral() + if !(lit.Type() == cel.StringType || lit.Type() == cel.IntType || + lit.Type() == cel.UintType || lit.Type() == cel.BoolType) { + return false + } + } + return true + } +} + +func setsIntersects(listA, listB ref.Val) ref.Val { + lA := listA.(traits.Lister) + lB := listB.(traits.Lister) + it := lA.Iterator() + for it.HasNext() == types.True { + exists := lB.Contains(it.Next()) + if exists == types.True { + return types.True + } + } + return types.False +} + +func setsContains(list, sublist ref.Val) ref.Val { + l := list.(traits.Lister) + sub := sublist.(traits.Lister) + it := sub.Iterator() + for it.HasNext() == types.True { + exists := l.Contains(it.Next()) + if exists != types.True { + return exists + } + } + return types.True +} + +func setsEquivalent(listA, listB ref.Val) ref.Val { + aContainsB := setsContains(listA, listB) + if aContainsB != types.True { + return aContainsB + } + return setsContains(listB, listA) +} + +func estimateSetsCost(costFactor float64) checker.FunctionEstimator { + return func(estimator checker.CostEstimator, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + if len(args) == 2 { + arg0Size := estimateSize(estimator, args[0]) + arg1Size := estimateSize(estimator, args[1]) + costEstimate := arg0Size.Multiply(arg1Size).MultiplyByCostFactor(costFactor).Add(callCostEstimate) + return &checker.CallEstimate{CostEstimate: costEstimate} + } + return nil + } +} + +func estimateSize(estimator checker.CostEstimator, node checker.AstNode) checker.SizeEstimate { + if l := node.ComputedSize(); l != nil { + return *l + } + if l := estimator.EstimateSize(node); l != nil { + return *l + } + return checker.SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func trackSetsCost(costFactor float64) interpreter.FunctionTracker { + return func(args []ref.Val, _ ref.Val) *uint64 { + lhsSize := actualSize(args[0]) + rhsSize := actualSize(args[1]) + cost := callCost + uint64(float64(lhsSize*rhsSize)*costFactor) + return &cost + } +} + +func actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } + return 1 +} + +var ( + callCostEstimate = checker.CostEstimate{Min: 1, Max: 1} + callCost = uint64(1) +) diff --git a/vendor/github.com/google/cel-go/ext/strings.go b/vendor/github.com/google/cel-go/ext/strings.go new file mode 100644 index 00000000..2e20f1e4 --- /dev/null +++ b/vendor/github.com/google/cel-go/ext/strings.go @@ -0,0 +1,765 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ext contains CEL extension libraries where each library defines a related set of +// constants, functions, macros, or other configuration settings which may not be covered by +// the core CEL spec. +package ext + +import ( + "fmt" + "math" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/text/language" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +const ( + defaultLocale = "en-US" + defaultPrecision = 6 +) + +// Strings returns a cel.EnvOption to configure extended functions for string manipulation. +// As a general note, all indices are zero-based. +// +// # CharAt +// +// Returns the character at the given position. If the position is negative, or greater than +// the length of the string, the function will produce an error: +// +// .charAt() -> +// +// Examples: +// +// 'hello'.charAt(4) // return 'o' +// 'hello'.charAt(5) // return '' +// 'hello'.charAt(-1) // error +// +// # Format +// +// Introduced at version: 1 +// +// Returns a new string with substitutions being performed, printf-style. +// The valid formatting clauses are: +// +// `%s` - substitutes a string. This can also be used on bools, lists, maps, bytes, +// Duration and Timestamp, in addition to all numerical types (int, uint, and double). +// Note that the dot/period decimal separator will always be used when printing a list +// or map that contains a double, and that null can be passed (which results in the +// string "null") in addition to types. +// `%d` - substitutes an integer. +// `%f` - substitutes a double with fixed-point precision. The default precision is 6, but +// this can be adjusted. The strings `Infinity`, `-Infinity`, and `NaN` are also valid input +// for this clause. +// `%e` - substitutes a double in scientific notation. The default precision is 6, but this +// can be adjusted. +// `%b` - substitutes an integer with its equivalent binary string. Can also be used on bools. +// `%x` - substitutes an integer with its equivalent in hexadecimal, or if given a string or +// bytes, will output each character's equivalent in hexadecimal. +// `%X` - same as above, but with A-F capitalized. +// `%o` - substitutes an integer with its equivalent in octal. +// +// .format() -> +// +// Examples: +// +// "this is a string: %s\nand an integer: %d".format(["str", 42]) // returns "this is a string: str\nand an integer: 42" +// "a double substituted with %%s: %s".format([64.2]) // returns "a double substituted with %s: 64.2" +// "string type: %s".format([type(string)]) // returns "string type: string" +// "timestamp: %s".format([timestamp("2023-02-03T23:31:20+00:00")]) // returns "timestamp: 2023-02-03T23:31:20Z" +// "duration: %s".format([duration("1h45m47s")]) // returns "duration: 6347s" +// "%f".format([3.14]) // returns "3.140000" +// "scientific notation: %e".format([2.71828]) // returns "scientific notation: 2.718280\u202f\u00d7\u202f10\u2070\u2070" +// "5 in binary: %b".format([5]), // returns "5 in binary; 101" +// "26 in hex: %x".format([26]), // returns "26 in hex: 1a" +// "26 in hex (uppercase): %X".format([26]) // returns "26 in hex (uppercase): 1A" +// "30 in octal: %o".format([30]) // returns "30 in octal: 36" +// "a map inside a list: %s".format([[1, 2, 3, {"a": "x", "b": "y", "c": "z"}]]) // returns "a map inside a list: [1, 2, 3, {"a":"x", "b":"y", "c":"d"}]" +// "true bool: %s - false bool: %s\nbinary bool: %b".format([true, false, true]) // returns "true bool: true - false bool: false\nbinary bool: 1" +// +// Passing an incorrect type (a string to `%b`) is considered an error, as well as attempting +// to use more formatting clauses than there are arguments (`%d %d %d` while passing two ints, for instance). +// If compile-time checking is enabled, and the formatting string is a constant, and the argument list is a literal, +// then letting any arguments go unused/unformatted is also considered an error. +// +// # IndexOf +// +// Returns the integer index of the first occurrence of the search string. If the search string is +// not found the function returns -1. +// +// The function also accepts an optional position from which to begin the substring search. If the +// substring is the empty string, the index where the search starts is returned (zero or custom). +// +// .indexOf() -> +// .indexOf(, ) -> +// +// Examples: +// +// 'hello mellow'.indexOf('') // returns 0 +// 'hello mellow'.indexOf('ello') // returns 1 +// 'hello mellow'.indexOf('jello') // returns -1 +// 'hello mellow'.indexOf('', 2) // returns 2 +// 'hello mellow'.indexOf('ello', 2) // returns 7 +// 'hello mellow'.indexOf('ello', 20) // error +// +// # Join +// +// Returns a new string where the elements of string list are concatenated. +// +// The function also accepts an optional separator which is placed between elements in the resulting string. +// +// >.join() -> +// >.join() -> +// +// Examples: +// +// ['hello', 'mellow'].join() // returns 'hellomellow' +// ['hello', 'mellow'].join(' ') // returns 'hello mellow' +// [].join() // returns '' +// [].join('/') // returns '' +// +// # LastIndexOf +// +// Returns the integer index at the start of the last occurrence of the search string. If the +// search string is not found the function returns -1. +// +// The function also accepts an optional position which represents the last index to be +// considered as the beginning of the substring match. If the substring is the empty string, +// the index where the search starts is returned (string length or custom). +// +// .lastIndexOf() -> +// .lastIndexOf(, ) -> +// +// Examples: +// +// 'hello mellow'.lastIndexOf('') // returns 12 +// 'hello mellow'.lastIndexOf('ello') // returns 7 +// 'hello mellow'.lastIndexOf('jello') // returns -1 +// 'hello mellow'.lastIndexOf('ello', 6) // returns 1 +// 'hello mellow'.lastIndexOf('ello', -1) // error +// +// # LowerAscii +// +// Returns a new string where all ASCII characters are lower-cased. +// +// This function does not perform Unicode case-mapping for characters outside the ASCII range. +// +// .lowerAscii() -> +// +// Examples: +// +// 'TacoCat'.lowerAscii() // returns 'tacocat' +// 'TacoCÆt Xii'.lowerAscii() // returns 'tacocÆt xii' +// +// # Strings.Quote +// +// Introduced in version: 1 +// +// Takes the given string and makes it safe to print (without any formatting due to escape sequences). +// If any invalid UTF-8 characters are encountered, they are replaced with \uFFFD. +// +// strings.quote() +// +// Examples: +// +// strings.quote('single-quote with "double quote"') // returns '"single-quote with \"double quote\""' +// strings.quote("two escape sequences \a\n") // returns '"two escape sequences \\a\\n"' +// +// # Replace +// +// Returns a new string based on the target, which replaces the occurrences of a search string +// with a replacement string if present. The function accepts an optional limit on the number of +// substring replacements to be made. +// +// When the replacement limit is 0, the result is the original string. When the limit is a negative +// number, the function behaves the same as replace all. +// +// .replace(, ) -> +// .replace(, , ) -> +// +// Examples: +// +// 'hello hello'.replace('he', 'we') // returns 'wello wello' +// 'hello hello'.replace('he', 'we', -1) // returns 'wello wello' +// 'hello hello'.replace('he', 'we', 1) // returns 'wello hello' +// 'hello hello'.replace('he', 'we', 0) // returns 'hello hello' +// 'hello hello'.replace('', '_') // returns '_h_e_l_l_o_ _h_e_l_l_o_' +// 'hello hello'.replace('h', '') // returns 'ello ello' +// +// # Split +// +// Returns a list of strings split from the input by the given separator. The function accepts +// an optional argument specifying a limit on the number of substrings produced by the split. +// +// When the split limit is 0, the result is an empty list. When the limit is 1, the result is the +// target string to split. When the limit is a negative number, the function behaves the same as +// split all. +// +// .split() -> > +// .split(, ) -> > +// +// Examples: +// +// 'hello hello hello'.split(' ') // returns ['hello', 'hello', 'hello'] +// 'hello hello hello'.split(' ', 0) // returns [] +// 'hello hello hello'.split(' ', 1) // returns ['hello hello hello'] +// 'hello hello hello'.split(' ', 2) // returns ['hello', 'hello hello'] +// 'hello hello hello'.split(' ', -1) // returns ['hello', 'hello', 'hello'] +// +// # Substring +// +// Returns the substring given a numeric range corresponding to character positions. Optionally +// may omit the trailing range for a substring from a given character position until the end of +// a string. +// +// Character offsets are 0-based with an inclusive start range and exclusive end range. It is an +// error to specify an end range that is lower than the start range, or for either the start or end +// index to be negative or exceed the string length. +// +// .substring() -> +// .substring(, ) -> +// +// Examples: +// +// 'tacocat'.substring(4) // returns 'cat' +// 'tacocat'.substring(0, 4) // returns 'taco' +// 'tacocat'.substring(-1) // error +// 'tacocat'.substring(2, 1) // error +// +// # Trim +// +// Returns a new string which removes the leading and trailing whitespace in the target string. +// The trim function uses the Unicode definition of whitespace which does not include the +// zero-width spaces. See: https://en.wikipedia.org/wiki/Whitespace_character#Unicode +// +// .trim() -> +// +// Examples: +// +// ' \ttrim\n '.trim() // returns 'trim' +// +// # UpperAscii +// +// Returns a new string where all ASCII characters are upper-cased. +// +// This function does not perform Unicode case-mapping for characters outside the ASCII range. +// +// .upperAscii() -> +// +// Examples: +// +// 'TacoCat'.upperAscii() // returns 'TACOCAT' +// 'TacoCÆt Xii'.upperAscii() // returns 'TACOCÆT XII' +// +// # Reverse +// +// Introduced at version: 3 +// +// Returns a new string whose characters are the same as the target string, only formatted in +// reverse order. +// This function relies on converting strings to rune arrays in order to reverse +// +// .reverse() -> +// +// Examples: +// +// 'gums'.reverse() // returns 'smug' +// 'John Smith'.reverse() // returns 'htimS nhoJ' +func Strings(options ...StringsOption) cel.EnvOption { + s := &stringLib{ + version: math.MaxUint32, + validateFormat: true, + } + for _, o := range options { + s = o(s) + } + return cel.Lib(s) +} + +type stringLib struct { + locale string + version uint32 + validateFormat bool +} + +// LibraryName implements the SingletonLibrary interface method. +func (*stringLib) LibraryName() string { + return "cel.lib.ext.strings" +} + +// StringsOption is a functional interface for configuring the strings library. +type StringsOption func(*stringLib) *stringLib + +// StringsLocale configures the library with the given locale. The locale tag will +// be checked for validity at the time that EnvOptions are configured. If this option +// is not passed, string.format will behave as if en_US was passed as the locale. +func StringsLocale(locale string) StringsOption { + return func(sl *stringLib) *stringLib { + sl.locale = locale + return sl + } +} + +// StringsVersion configures the version of the string library. +// +// The version limits which functions are available. Only functions introduced +// below or equal to the given version included in the library. If this option +// is not set, all functions are available. +// +// See the library documentation to determine which version a function was introduced. +// If the documentation does not state which version a function was introduced, it can +// be assumed to be introduced at version 0, when the library was first created. +func StringsVersion(version uint32) StringsOption { + return func(lib *stringLib) *stringLib { + lib.version = version + return lib + } +} + +// StringsValidateFormatCalls validates type-checked ASTs to ensure that string.format() calls have +// valid formatting clauses and valid argument types for each clause. +// +// Enabled by default. +func StringsValidateFormatCalls(value bool) StringsOption { + return func(s *stringLib) *stringLib { + s.validateFormat = value + return s + } +} + +// CompileOptions implements the Library interface method. +func (lib *stringLib) CompileOptions() []cel.EnvOption { + formatLocale := "en_US" + if lib.locale != "" { + // ensure locale is properly-formed if set + _, err := language.Parse(lib.locale) + if err != nil { + return []cel.EnvOption{ + func(e *cel.Env) (*cel.Env, error) { + return nil, fmt.Errorf("failed to parse locale: %w", err) + }, + } + } + formatLocale = lib.locale + } + + opts := []cel.EnvOption{ + cel.Function("charAt", + cel.MemberOverload("string_char_at_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType, + cel.BinaryBinding(func(str, ind ref.Val) ref.Val { + s := str.(types.String) + i := ind.(types.Int) + return stringOrError(charAt(string(s), int64(i))) + }))), + cel.Function("indexOf", + cel.MemberOverload("string_index_of_string", []*cel.Type{cel.StringType, cel.StringType}, cel.IntType, + cel.BinaryBinding(func(str, substr ref.Val) ref.Val { + s := str.(types.String) + sub := substr.(types.String) + return intOrError(indexOf(string(s), string(sub))) + })), + cel.MemberOverload("string_index_of_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.IntType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + sub := args[1].(types.String) + offset := args[2].(types.Int) + return intOrError(indexOfOffset(string(s), string(sub), int64(offset))) + }))), + cel.Function("lastIndexOf", + cel.MemberOverload("string_last_index_of_string", []*cel.Type{cel.StringType, cel.StringType}, cel.IntType, + cel.BinaryBinding(func(str, substr ref.Val) ref.Val { + s := str.(types.String) + sub := substr.(types.String) + return intOrError(lastIndexOf(string(s), string(sub))) + })), + cel.MemberOverload("string_last_index_of_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.IntType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + sub := args[1].(types.String) + offset := args[2].(types.Int) + return intOrError(lastIndexOfOffset(string(s), string(sub), int64(offset))) + }))), + cel.Function("lowerAscii", + cel.MemberOverload("string_lower_ascii", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(lowerASCII(string(s))) + }))), + cel.Function("replace", + cel.MemberOverload( + "string_replace_string_string", []*cel.Type{cel.StringType, cel.StringType, cel.StringType}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + str := args[0].(types.String) + old := args[1].(types.String) + new := args[2].(types.String) + return stringOrError(replace(string(str), string(old), string(new))) + })), + cel.MemberOverload( + "string_replace_string_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.StringType, cel.IntType}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + str := args[0].(types.String) + old := args[1].(types.String) + new := args[2].(types.String) + n := args[3].(types.Int) + return stringOrError(replaceN(string(str), string(old), string(new), int64(n))) + }))), + cel.Function("split", + cel.MemberOverload("string_split_string", []*cel.Type{cel.StringType, cel.StringType}, cel.ListType(cel.StringType), + cel.BinaryBinding(func(str, separator ref.Val) ref.Val { + s := str.(types.String) + sep := separator.(types.String) + return listStringOrError(split(string(s), string(sep))) + })), + cel.MemberOverload("string_split_string_int", []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, cel.ListType(cel.StringType), + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + sep := args[1].(types.String) + n := args[2].(types.Int) + return listStringOrError(splitN(string(s), string(sep), int64(n))) + }))), + cel.Function("substring", + cel.MemberOverload("string_substring_int", []*cel.Type{cel.StringType, cel.IntType}, cel.StringType, + cel.BinaryBinding(func(str, offset ref.Val) ref.Val { + s := str.(types.String) + off := offset.(types.Int) + return stringOrError(substr(string(s), int64(off))) + })), + cel.MemberOverload("string_substring_int_int", []*cel.Type{cel.StringType, cel.IntType, cel.IntType}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := args[0].(types.String) + start := args[1].(types.Int) + end := args[2].(types.Int) + return stringOrError(substrRange(string(s), int64(start), int64(end))) + }))), + cel.Function("trim", + cel.MemberOverload("string_trim", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(trimSpace(string(s))) + }))), + cel.Function("upperAscii", + cel.MemberOverload("string_upper_ascii", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(upperASCII(string(s))) + }))), + } + if lib.version >= 1 { + opts = append(opts, cel.Function("format", + cel.MemberOverload("string_format", []*cel.Type{cel.StringType, cel.ListType(cel.DynType)}, cel.StringType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + s := string(args[0].(types.String)) + formatArgs := args[1].(traits.Lister) + return stringOrError(parseFormatString(s, &stringFormatter{}, &stringArgList{formatArgs}, formatLocale)) + }))), + cel.Function("strings.quote", cel.Overload("strings_quote", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(quote(string(s))) + }))), + + cel.ASTValidators(stringFormatValidator{})) + + } + if lib.version >= 2 { + opts = append(opts, + cel.Function("join", + cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + l := list.(traits.Lister) + return stringOrError(joinValSeparator(l, "")) + })), + cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.BinaryBinding(func(list, delim ref.Val) ref.Val { + l := list.(traits.Lister) + d := delim.(types.String) + return stringOrError(joinValSeparator(l, string(d))) + }))), + ) + } else { + opts = append(opts, + cel.Function("join", + cel.MemberOverload("list_join", []*cel.Type{cel.ListType(cel.StringType)}, cel.StringType, + cel.UnaryBinding(func(list ref.Val) ref.Val { + l, err := list.ConvertToNative(stringListType) + if err != nil { + return types.WrapErr(err) + } + return stringOrError(join(l.([]string))) + })), + cel.MemberOverload("list_join_string", []*cel.Type{cel.ListType(cel.StringType), cel.StringType}, cel.StringType, + cel.BinaryBinding(func(list, delim ref.Val) ref.Val { + l, err := list.ConvertToNative(stringListType) + if err != nil { + return types.WrapErr(err) + } + d := delim.(types.String) + return stringOrError(joinSeparator(l.([]string), string(d))) + }))), + ) + } + if lib.version >= 3 { + opts = append(opts, + cel.Function("reverse", + cel.MemberOverload("reverse", []*cel.Type{cel.StringType}, cel.StringType, + cel.UnaryBinding(func(str ref.Val) ref.Val { + s := str.(types.String) + return stringOrError(reverse(string(s))) + }))), + ) + } + if lib.validateFormat { + opts = append(opts, cel.ASTValidators(stringFormatValidator{})) + } + return opts +} + +// ProgramOptions implements the Library interface method. +func (*stringLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func charAt(str string, ind int64) (string, error) { + i := int(ind) + runes := []rune(str) + if i < 0 || i > len(runes) { + return "", fmt.Errorf("index out of range: %d", ind) + } + if i == len(runes) { + return "", nil + } + return string(runes[i]), nil +} + +func indexOf(str, substr string) (int64, error) { + return indexOfOffset(str, substr, int64(0)) +} + +func indexOfOffset(str, substr string, offset int64) (int64, error) { + if substr == "" { + return offset, nil + } + off := int(offset) + runes := []rune(str) + subrunes := []rune(substr) + if off < 0 || off >= len(runes) { + return -1, fmt.Errorf("index out of range: %d", off) + } + for i := off; i < len(runes)-(len(subrunes)-1); i++ { + found := true + for j := 0; j < len(subrunes); j++ { + if runes[i+j] != subrunes[j] { + found = false + break + } + } + if found { + return int64(i), nil + } + } + return -1, nil +} + +func lastIndexOf(str, substr string) (int64, error) { + runes := []rune(str) + if substr == "" { + return int64(len(runes)), nil + } + return lastIndexOfOffset(str, substr, int64(len(runes)-1)) +} + +func lastIndexOfOffset(str, substr string, offset int64) (int64, error) { + if substr == "" { + return offset, nil + } + off := int(offset) + runes := []rune(str) + subrunes := []rune(substr) + if off < 0 || off >= len(runes) { + return -1, fmt.Errorf("index out of range: %d", off) + } + if off > len(runes)-len(subrunes) { + off = len(runes) - len(subrunes) + } + for i := off; i >= 0; i-- { + found := true + for j := 0; j < len(subrunes); j++ { + if runes[i+j] != subrunes[j] { + found = false + break + } + } + if found { + return int64(i), nil + } + } + return -1, nil +} + +func lowerASCII(str string) (string, error) { + runes := []rune(str) + for i, r := range runes { + if r <= unicode.MaxASCII { + r = unicode.ToLower(r) + runes[i] = r + } + } + return string(runes), nil +} + +func replace(str, old, new string) (string, error) { + return strings.ReplaceAll(str, old, new), nil +} + +func replaceN(str, old, new string, n int64) (string, error) { + return strings.Replace(str, old, new, int(n)), nil +} + +func split(str, sep string) ([]string, error) { + return strings.Split(str, sep), nil +} + +func splitN(str, sep string, n int64) ([]string, error) { + return strings.SplitN(str, sep, int(n)), nil +} + +func substr(str string, start int64) (string, error) { + runes := []rune(str) + if int(start) < 0 || int(start) > len(runes) { + return "", fmt.Errorf("index out of range: %d", start) + } + return string(runes[start:]), nil +} + +func substrRange(str string, start, end int64) (string, error) { + runes := []rune(str) + l := len(runes) + if start > end { + return "", fmt.Errorf("invalid substring range. start: %d, end: %d", start, end) + } + if int(start) < 0 || int(start) > l { + return "", fmt.Errorf("index out of range: %d", start) + } + if int(end) < 0 || int(end) > l { + return "", fmt.Errorf("index out of range: %d", end) + } + return string(runes[int(start):int(end)]), nil +} + +func trimSpace(str string) (string, error) { + return strings.TrimSpace(str), nil +} + +func upperASCII(str string) (string, error) { + runes := []rune(str) + for i, r := range runes { + if r <= unicode.MaxASCII { + r = unicode.ToUpper(r) + runes[i] = r + } + } + return string(runes), nil +} + +func reverse(str string) (string, error) { + chars := []rune(str) + for i, j := 0, len(chars)-1; i < j; i, j = i+1, j-1 { + chars[i], chars[j] = chars[j], chars[i] + } + return string(chars), nil +} + +func joinSeparator(strs []string, separator string) (string, error) { + return strings.Join(strs, separator), nil +} + +func join(strs []string) (string, error) { + return strings.Join(strs, ""), nil +} + +func joinValSeparator(strs traits.Lister, separator string) (string, error) { + sz := strs.Size().(types.Int) + var sb strings.Builder + for i := types.Int(0); i < sz; i++ { + if i != 0 { + sb.WriteString(separator) + } + elem := strs.Get(i) + str, ok := elem.(types.String) + if !ok { + return "", fmt.Errorf("join: invalid input: %v", elem) + } + sb.WriteString(string(str)) + } + return sb.String(), nil +} + +// quote implements a string quoting function. The string will be wrapped in +// double quotes, and all valid CEL escape sequences will be escaped to show up +// literally if printed. If the input contains any invalid UTF-8, the invalid runes +// will be replaced with utf8.RuneError. +func quote(s string) (string, error) { + var quotedStrBuilder strings.Builder + for _, c := range sanitize(s) { + switch c { + case '\a': + quotedStrBuilder.WriteString("\\a") + case '\b': + quotedStrBuilder.WriteString("\\b") + case '\f': + quotedStrBuilder.WriteString("\\f") + case '\n': + quotedStrBuilder.WriteString("\\n") + case '\r': + quotedStrBuilder.WriteString("\\r") + case '\t': + quotedStrBuilder.WriteString("\\t") + case '\v': + quotedStrBuilder.WriteString("\\v") + case '\\': + quotedStrBuilder.WriteString("\\\\") + case '"': + quotedStrBuilder.WriteString("\\\"") + default: + quotedStrBuilder.WriteRune(c) + } + } + escapedStr := quotedStrBuilder.String() + return "\"" + escapedStr + "\"", nil +} + +// sanitize replaces all invalid runes in the given string with utf8.RuneError. +func sanitize(s string) string { + var sanitizedStringBuilder strings.Builder + for _, r := range s { + if !utf8.ValidRune(r) { + sanitizedStringBuilder.WriteRune(utf8.RuneError) + } else { + sanitizedStringBuilder.WriteRune(r) + } + } + return sanitizedStringBuilder.String() +} + +var ( + stringListType = reflect.TypeOf([]string{}) +) diff --git a/vendor/github.com/google/cel-go/interpreter/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel new file mode 100644 index 00000000..220e23d4 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/BUILD.bazel @@ -0,0 +1,74 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "activation.go", + "attribute_patterns.go", + "attributes.go", + "decorators.go", + "dispatcher.go", + "evalstate.go", + "interpretable.go", + "interpreter.go", + "optimizations.go", + "planner.go", + "prune.go", + "runtimecost.go", + ], + importpath = "github.com/google/cel-go/interpreter", + deps = [ + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/containers:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/overloads:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//common/types/traits:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/durationpb:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + "@org_golang_google_protobuf//types/known/timestamppb:go_default_library", + "@org_golang_google_protobuf//types/known/wrapperspb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = [ + "activation_test.go", + "attribute_patterns_test.go", + "attributes_test.go", + "interpreter_test.go", + "prune_test.go", + "runtimecost_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//checker:go_default_library", + "//common/containers:go_default_library", + "//common/debug:go_default_library", + "//common/decls:go_default_library", + "//common/functions:go_default_library", + "//common/operators:go_default_library", + "//common/stdlib:go_default_library", + "//common/types:go_default_library", + "//parser:go_default_library", + "//test:go_default_library", + "//test/proto2pb:go_default_library", + "//test/proto3pb:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/anypb:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/interpreter/activation.go b/vendor/github.com/google/cel-go/interpreter/activation.go new file mode 100644 index 00000000..a8026445 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/activation.go @@ -0,0 +1,201 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "errors" + "fmt" + "sync" + + "github.com/google/cel-go/common/types/ref" +) + +// Activation used to resolve identifiers by name and references by id. +// +// An Activation is the primary mechanism by which a caller supplies input into a CEL program. +type Activation interface { + // ResolveName returns a value from the activation by qualified name, or false if the name + // could not be found. + ResolveName(name string) (any, bool) + + // Parent returns the parent of the current activation, may be nil. + // If non-nil, the parent will be searched during resolve calls. + Parent() Activation +} + +// EmptyActivation returns a variable-free activation. +func EmptyActivation() Activation { + return emptyActivation{} +} + +// emptyActivation is a variable-free activation. +type emptyActivation struct{} + +func (emptyActivation) ResolveName(string) (any, bool) { return nil, false } +func (emptyActivation) Parent() Activation { return nil } + +// NewActivation returns an activation based on a map-based binding where the map keys are +// expected to be qualified names used with ResolveName calls. +// +// The input `bindings` may either be of type `Activation` or `map[string]any`. +// +// Lazy bindings may be supplied within the map-based input in either of the following forms: +// - func() any +// - func() ref.Val +// +// The output of the lazy binding will overwrite the variable reference in the internal map. +// +// Values which are not represented as ref.Val types on input may be adapted to a ref.Val using +// the types.Adapter configured in the environment. +func NewActivation(bindings any) (Activation, error) { + if bindings == nil { + return nil, errors.New("bindings must be non-nil") + } + a, isActivation := bindings.(Activation) + if isActivation { + return a, nil + } + m, isMap := bindings.(map[string]any) + if !isMap { + return nil, fmt.Errorf( + "activation input must be an activation or map[string]interface: got %T", + bindings) + } + return &mapActivation{bindings: m}, nil +} + +// mapActivation which implements Activation and maps of named values. +// +// Named bindings may lazily supply values by providing a function which accepts no arguments and +// produces an interface value. +type mapActivation struct { + bindings map[string]any +} + +// Parent implements the Activation interface method. +func (a *mapActivation) Parent() Activation { + return nil +} + +// ResolveName implements the Activation interface method. +func (a *mapActivation) ResolveName(name string) (any, bool) { + obj, found := a.bindings[name] + if !found { + return nil, false + } + fn, isLazy := obj.(func() ref.Val) + if isLazy { + obj = fn() + a.bindings[name] = obj + } + fnRaw, isLazy := obj.(func() any) + if isLazy { + obj = fnRaw() + a.bindings[name] = obj + } + return obj, found +} + +// hierarchicalActivation which implements Activation and contains a parent and +// child activation. +type hierarchicalActivation struct { + parent Activation + child Activation +} + +// Parent implements the Activation interface method. +func (a *hierarchicalActivation) Parent() Activation { + return a.parent +} + +// ResolveName implements the Activation interface method. +func (a *hierarchicalActivation) ResolveName(name string) (any, bool) { + if object, found := a.child.ResolveName(name); found { + return object, found + } + return a.parent.ResolveName(name) +} + +// NewHierarchicalActivation takes two activations and produces a new one which prioritizes +// resolution in the child first and parent(s) second. +func NewHierarchicalActivation(parent Activation, child Activation) Activation { + return &hierarchicalActivation{parent, child} +} + +// NewPartialActivation returns an Activation which contains a list of AttributePattern values +// representing field and index operations that should result in a 'types.Unknown' result. +// +// The `bindings` value may be any value type supported by the interpreter.NewActivation call, +// but is typically either an existing Activation or map[string]any. +func NewPartialActivation(bindings any, + unknowns ...*AttributePattern) (PartialActivation, error) { + a, err := NewActivation(bindings) + if err != nil { + return nil, err + } + return &partActivation{Activation: a, unknowns: unknowns}, nil +} + +// PartialActivation extends the Activation interface with a set of UnknownAttributePatterns. +type PartialActivation interface { + Activation + + // UnknownAttributePaths returns a set of AttributePattern values which match Attribute + // expressions for data accesses whose values are not yet known. + UnknownAttributePatterns() []*AttributePattern +} + +// partActivation is the default implementations of the PartialActivation interface. +type partActivation struct { + Activation + unknowns []*AttributePattern +} + +// UnknownAttributePatterns implements the PartialActivation interface method. +func (a *partActivation) UnknownAttributePatterns() []*AttributePattern { + return a.unknowns +} + +// varActivation represents a single mutable variable binding. +// +// This activation type should only be used within folds as the fold loop controls the object +// life-cycle. +type varActivation struct { + parent Activation + name string + val ref.Val +} + +// Parent implements the Activation interface method. +func (v *varActivation) Parent() Activation { + return v.parent +} + +// ResolveName implements the Activation interface method. +func (v *varActivation) ResolveName(name string) (any, bool) { + if name == v.name { + return v.val, true + } + return v.parent.ResolveName(name) +} + +var ( + // pool of var activations to reduce allocations during folds. + varActivationPool = &sync.Pool{ + New: func() any { + return &varActivation{} + }, + } +) diff --git a/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go new file mode 100644 index 00000000..8f19bde7 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go @@ -0,0 +1,397 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// AttributePattern represents a top-level variable with an optional set of qualifier patterns. +// +// When using a CEL expression within a container, e.g. a package or namespace, the variable name +// in the pattern must match the qualified name produced during the variable namespace resolution. +// For example, if variable `c` appears in an expression whose container is `a.b`, the variable +// name supplied to the pattern must be `a.b.c` +// +// The qualifier patterns for attribute matching must be one of the following: +// +// - valid map key type: string, int, uint, bool +// - wildcard (*) +// +// Examples: +// +// 1. ns.myvar["complex-value"] +// 2. ns.myvar["complex-value"][0] +// 3. ns.myvar["complex-value"].*.name +// +// The first example is simple: match an attribute where the variable is 'ns.myvar' with a +// field access on 'complex-value'. The second example expands the match to indicate that only +// a specific index `0` should match. And lastly, the third example matches any indexed access +// that later selects the 'name' field. +type AttributePattern struct { + variable string + qualifierPatterns []*AttributeQualifierPattern +} + +// NewAttributePattern produces a new mutable AttributePattern based on a variable name. +func NewAttributePattern(variable string) *AttributePattern { + return &AttributePattern{ + variable: variable, + qualifierPatterns: []*AttributeQualifierPattern{}, + } +} + +// QualString adds a string qualifier pattern to the AttributePattern. The string may be a valid +// identifier, or string map key including empty string. +func (apat *AttributePattern) QualString(pattern string) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// QualInt adds an int qualifier pattern to the AttributePattern. The index may be either a map or +// list index. +func (apat *AttributePattern) QualInt(pattern int64) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// QualUint adds an uint qualifier pattern for a map index operation to the AttributePattern. +func (apat *AttributePattern) QualUint(pattern uint64) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// QualBool adds a bool qualifier pattern for a map index operation to the AttributePattern. +func (apat *AttributePattern) QualBool(pattern bool) *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{value: pattern}) + return apat +} + +// Wildcard adds a special sentinel qualifier pattern that will match any single qualifier. +func (apat *AttributePattern) Wildcard() *AttributePattern { + apat.qualifierPatterns = append(apat.qualifierPatterns, + &AttributeQualifierPattern{wildcard: true}) + return apat +} + +// VariableMatches returns true if the fully qualified variable matches the AttributePattern +// fully qualified variable name. +func (apat *AttributePattern) VariableMatches(variable string) bool { + return apat.variable == variable +} + +// QualifierPatterns returns the set of AttributeQualifierPattern values on the AttributePattern. +func (apat *AttributePattern) QualifierPatterns() []*AttributeQualifierPattern { + return apat.qualifierPatterns +} + +// AttributeQualifierPattern holds a wildcard or valued qualifier pattern. +type AttributeQualifierPattern struct { + wildcard bool + value any +} + +// Matches returns true if the qualifier pattern is a wildcard, or the Qualifier implements the +// qualifierValueEquator interface and its IsValueEqualTo returns true for the qualifier pattern. +func (qpat *AttributeQualifierPattern) Matches(q Qualifier) bool { + if qpat.wildcard { + return true + } + qve, ok := q.(qualifierValueEquator) + return ok && qve.QualifierValueEquals(qpat.value) +} + +// qualifierValueEquator defines an interface for determining if an input value, of valid map key +// type, is equal to the value held in the Qualifier. This interface is used by the +// AttributeQualifierPattern to determine pattern matches for non-wildcard qualifier patterns. +// +// Note: Attribute values are also Qualifier values; however, Attributes are resolved before +// qualification happens. This is an implementation detail, but one relevant to why the Attribute +// types do not surface in the list of implementations. +// +// See: partialAttributeFactory.matchesUnknownPatterns for more details on how this interface is +// used. +type qualifierValueEquator interface { + // QualifierValueEquals returns true if the input value is equal to the value held in the + // Qualifier. + QualifierValueEquals(value any) bool +} + +// QualifierValueEquals implementation for boolean qualifiers. +func (q *boolQualifier) QualifierValueEquals(value any) bool { + bval, ok := value.(bool) + return ok && q.value == bval +} + +// QualifierValueEquals implementation for field qualifiers. +func (q *fieldQualifier) QualifierValueEquals(value any) bool { + sval, ok := value.(string) + return ok && q.Name == sval +} + +// QualifierValueEquals implementation for string qualifiers. +func (q *stringQualifier) QualifierValueEquals(value any) bool { + sval, ok := value.(string) + return ok && q.value == sval +} + +// QualifierValueEquals implementation for int qualifiers. +func (q *intQualifier) QualifierValueEquals(value any) bool { + return numericValueEquals(value, q.celValue) +} + +// QualifierValueEquals implementation for uint qualifiers. +func (q *uintQualifier) QualifierValueEquals(value any) bool { + return numericValueEquals(value, q.celValue) +} + +// QualifierValueEquals implementation for double qualifiers. +func (q *doubleQualifier) QualifierValueEquals(value any) bool { + return numericValueEquals(value, q.celValue) +} + +// numericValueEquals uses CEL equality to determine whether two number values are +func numericValueEquals(value any, celValue ref.Val) bool { + val := types.DefaultTypeAdapter.NativeToValue(value) + return celValue.Equal(val) == types.True +} + +// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing +// AttributePattern matches with PartialActivation inputs. +func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := NewAttributeFactory(container, adapter, provider, opts...) + return &partialAttributeFactory{ + AttributeFactory: fac, + container: container, + adapter: adapter, + provider: provider, + } +} + +type partialAttributeFactory struct { + AttributeFactory + container *containers.Container + adapter types.Adapter + provider types.Provider +} + +// AbsoluteAttribute implementation of the AttributeFactory interface which wraps the +// NamespacedAttribute resolution in an internal attributeMatcher object to dynamically match +// unknown patterns from PartialActivation inputs if given. +func (fac *partialAttributeFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { + attr := fac.AttributeFactory.AbsoluteAttribute(id, names...) + return &attributeMatcher{fac: fac, NamespacedAttribute: attr} +} + +// MaybeAttribute implementation of the AttributeFactory interface which ensure that the set of +// 'maybe' NamespacedAttribute values are produced using the partialAttributeFactory rather than +// the base AttributeFactory implementation. +func (fac *partialAttributeFactory) MaybeAttribute(id int64, name string) Attribute { + return &maybeAttribute{ + id: id, + attrs: []NamespacedAttribute{ + fac.AbsoluteAttribute(id, fac.container.ResolveCandidateNames(name)...), + }, + adapter: fac.adapter, + provider: fac.provider, + fac: fac, + } +} + +// matchesUnknownPatterns returns true if the variable names and qualifiers for a given +// Attribute value match any of the ActivationPattern objects in the set of unknown activation +// patterns on the given PartialActivation. +// +// For example, in the expression `a.b`, the Attribute is composed of variable `a`, with string +// qualifier `b`. When a PartialActivation is supplied, it indicates that some or all of the data +// provided in the input is unknown by specifying unknown AttributePatterns. An AttributePattern +// that refers to variable `a` with a string qualifier of `c` will not match `a.b`; however, any +// of the following patterns will match Attribute `a.b`: +// +// - `AttributePattern("a")` +// - `AttributePattern("a").Wildcard()` +// - `AttributePattern("a").QualString("b")` +// - `AttributePattern("a").QualString("b").QualInt(0)` +// +// Any AttributePattern which overlaps an Attribute or vice-versa will produce an Unknown result +// for the last pattern matched variable or qualifier in the Attribute. In the first matching +// example, the expression id representing variable `a` would be listed in the Unknown result, +// whereas in the other pattern examples, the qualifier `b` would be returned as the Unknown. +func (fac *partialAttributeFactory) matchesUnknownPatterns( + vars PartialActivation, + attrID int64, + variableNames []string, + qualifiers []Qualifier) (*types.Unknown, error) { + patterns := vars.UnknownAttributePatterns() + candidateIndices := map[int]struct{}{} + for _, variable := range variableNames { + for i, pat := range patterns { + if pat.VariableMatches(variable) { + if len(qualifiers) == 0 { + return types.NewUnknown(attrID, types.NewAttributeTrail(variable)), nil + } + candidateIndices[i] = struct{}{} + } + } + } + // Determine whether to return early if there are no candidate unknown patterns. + if len(candidateIndices) == 0 { + return nil, nil + } + // Resolve the attribute qualifiers into a static set. This prevents more dynamic + // Attribute resolutions than necessary when there are multiple unknown patterns + // that traverse the same Attribute-based qualifier field. + newQuals := make([]Qualifier, len(qualifiers)) + for i, qual := range qualifiers { + attr, isAttr := qual.(Attribute) + if isAttr { + val, err := attr.Resolve(vars) + if err != nil { + return nil, err + } + // If this resolution behavior ever changes, new implementations of the + // qualifierValueEquator may be required to handle proper resolution. + qual, err = fac.NewQualifier(nil, qual.ID(), val, attr.IsOptional()) + if err != nil { + return nil, err + } + } + newQuals[i] = qual + } + // Determine whether any of the unknown patterns match. + for patIdx := range candidateIndices { + pat := patterns[patIdx] + isUnk := true + matchExprID := attrID + qualPats := pat.QualifierPatterns() + for i, qual := range newQuals { + if i >= len(qualPats) { + break + } + matchExprID = qual.ID() + qualPat := qualPats[i] + // Note, the AttributeQualifierPattern relies on the input Qualifier not being an + // Attribute, since there is no way to resolve the Attribute with the information + // provided to the Matches call. + if !qualPat.Matches(qual) { + isUnk = false + break + } + } + if isUnk { + attr := types.NewAttributeTrail(pat.variable) + for i := 0; i < len(qualPats) && i < len(newQuals); i++ { + if qual, ok := newQuals[i].(ConstantQualifier); ok { + switch v := qual.Value().Value().(type) { + case bool: + types.QualifyAttribute[bool](attr, v) + case float64: + types.QualifyAttribute[int64](attr, int64(v)) + case int64: + types.QualifyAttribute[int64](attr, v) + case string: + types.QualifyAttribute[string](attr, v) + case uint64: + types.QualifyAttribute[uint64](attr, v) + default: + types.QualifyAttribute[string](attr, fmt.Sprintf("%v", v)) + } + } else { + types.QualifyAttribute[string](attr, "*") + } + } + return types.NewUnknown(matchExprID, attr), nil + } + } + return nil, nil +} + +// attributeMatcher embeds the NamespacedAttribute interface which allows it to participate in +// AttributePattern matching against Attribute values without having to modify the code paths that +// identify Attributes in expressions. +type attributeMatcher struct { + NamespacedAttribute + qualifiers []Qualifier + fac *partialAttributeFactory +} + +// AddQualifier implements the Attribute interface method. +func (m *attributeMatcher) AddQualifier(qual Qualifier) (Attribute, error) { + // Add the qualifier to the embedded NamespacedAttribute. If the input to the Resolve + // method is not a PartialActivation, or does not match an unknown attribute pattern, the + // Resolve method is directly invoked on the underlying NamespacedAttribute. + _, err := m.NamespacedAttribute.AddQualifier(qual) + if err != nil { + return nil, err + } + // The attributeMatcher overloads TryResolve and will attempt to match unknown patterns against + // the variable name and qualifier set contained within the Attribute. These values are not + // directly inspectable on the top-level NamespacedAttribute interface and so are tracked within + // the attributeMatcher. + m.qualifiers = append(m.qualifiers, qual) + return m, nil +} + +// Resolve is an implementation of the NamespacedAttribute interface method which tests +// for matching unknown attribute patterns and returns types.Unknown if present. Otherwise, +// the standard Resolve logic applies. +func (m *attributeMatcher) Resolve(vars Activation) (any, error) { + id := m.NamespacedAttribute.ID() + // Bug in how partial activation is resolved, should search parents as well. + partial, isPartial := toPartialActivation(vars) + if isPartial { + unk, err := m.fac.matchesUnknownPatterns( + partial, + id, + m.CandidateVariableNames(), + m.qualifiers) + if err != nil { + return nil, err + } + if unk != nil { + return unk, nil + } + } + return m.NamespacedAttribute.Resolve(vars) +} + +// Qualify is an implementation of the Qualifier interface method. +func (m *attributeMatcher) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(m.fac, vars, obj, m) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (m *attributeMatcher) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(m.fac, vars, obj, m, presenceOnly) +} + +func toPartialActivation(vars Activation) (PartialActivation, bool) { + pv, ok := vars.(PartialActivation) + if ok { + return pv, true + } + if vars.Parent() != nil { + return toPartialActivation(vars.Parent()) + } + return nil, false +} diff --git a/vendor/github.com/google/cel-go/interpreter/attributes.go b/vendor/github.com/google/cel-go/interpreter/attributes.go new file mode 100644 index 00000000..b1b3aacc --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/attributes.go @@ -0,0 +1,1436 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// AttributeFactory provides methods creating Attribute and Qualifier values. +type AttributeFactory interface { + // AbsoluteAttribute creates an attribute that refers to a top-level variable name. + // + // Checked expressions generate absolute attribute with a single name. + // Parse-only expressions may have more than one possible absolute identifier when the + // expression is created within a container, e.g. package or namespace. + // + // When there is more than one name supplied to the AbsoluteAttribute call, the names + // must be in CEL's namespace resolution order. The name arguments provided here are + // returned in the same order as they were provided by the NamespacedAttribute + // CandidateVariableNames method. + AbsoluteAttribute(id int64, names ...string) NamespacedAttribute + + // ConditionalAttribute creates an attribute with two Attribute branches, where the Attribute + // that is resolved depends on the boolean evaluation of the input 'expr'. + ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute + + // MaybeAttribute creates an attribute that refers to either a field selection or a namespaced + // variable name. + // + // Only expressions which have not been type-checked may generate oneof attributes. + MaybeAttribute(id int64, name string) Attribute + + // RelativeAttribute creates an attribute whose value is a qualification of a dynamic + // computation rather than a static variable reference. + RelativeAttribute(id int64, operand Interpretable) Attribute + + // NewQualifier creates a qualifier on the target object with a given value. + // + // The 'val' may be an Attribute or any proto-supported map key type: bool, int, string, uint. + // + // The qualifier may consider the object type being qualified, if present. If absent, the + // qualification should be considered dynamic and the qualification should still work, though + // it may be sub-optimal. + NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) +} + +// Qualifier marker interface for designating different qualifier values and where they appear +// within field selections and index call expressions (`_[_]`). +type Qualifier interface { + // ID where the qualifier appears within an expression. + ID() int64 + + // IsOptional specifies whether the qualifier is optional. + // Instead of a direct qualification, an optional qualifier will be resolved via QualifyIfPresent + // rather than Qualify. A non-optional qualifier may also be resolved through QualifyIfPresent if + // the object to qualify is itself optional. + IsOptional() bool + + // Qualify performs a qualification, e.g. field selection, on the input object and returns + // the value of the access and whether the value was set. A non-nil value with a false presence + // test result indicates that the value being returned is the default value. + Qualify(vars Activation, obj any) (any, error) + + // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object. + // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as + // to whether the qualifier is present. + QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) +} + +// ConstantQualifier interface embeds the Qualifier interface and provides an option to inspect the +// qualifier's constant value. +// +// Non-constant qualifiers are of Attribute type. +type ConstantQualifier interface { + Qualifier + + // Value returns the constant value associated with the qualifier. + Value() ref.Val +} + +// Attribute values are a variable or value with an optional set of qualifiers, such as field, key, +// or index accesses. +type Attribute interface { + Qualifier + + // AddQualifier adds a qualifier on the Attribute or error if the qualification is not a valid qualifier type. + AddQualifier(Qualifier) (Attribute, error) + + // Resolve returns the value of the Attribute and whether it was present given an Activation. + // For objects which support safe traversal, the value may be non-nil and the presence flag be false. + // + // If an error is encountered during attribute resolution, it will be returned immediately. + // If the attribute cannot be resolved within the Activation, the result must be: `nil`, `error` + // with the error indicating which variable was missing. + Resolve(Activation) (any, error) +} + +// NamespacedAttribute values are a variable within a namespace, and an optional set of qualifiers +// such as field, key, or index accesses. +type NamespacedAttribute interface { + Attribute + + // CandidateVariableNames returns the possible namespaced variable names for this Attribute in + // the CEL namespace resolution order. + CandidateVariableNames() []string + + // Qualifiers returns the list of qualifiers associated with the Attribute. + Qualifiers() []Qualifier +} + +// AttrFactoryOption specifies a functional option for configuring an attribute factory. +type AttrFactoryOption func(*attrFactory) *attrFactory + +// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection +// is performed on a primitive type. +func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption { + return func(fac *attrFactory) *attrFactory { + fac.errorOnBadPresenceTest = value + return fac + } +} + +// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values +// capable of resolving types by simple names and qualify the values using the supported qualifier +// types: bool, int, string, and uint. +func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory { + fac := &attrFactory{ + container: cont, + adapter: a, + provider: p, + } + for _, o := range opts { + fac = o(fac) + } + return fac +} + +type attrFactory struct { + container *containers.Container + adapter types.Adapter + provider types.Provider + + errorOnBadPresenceTest bool +} + +// AbsoluteAttribute refers to a variable value and an optional qualifier path. +// +// The namespaceNames represent the names the variable could have based on namespace +// resolution rules. +func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute { + return &absoluteAttribute{ + id: id, + namespaceNames: names, + qualifiers: []Qualifier{}, + adapter: r.adapter, + provider: r.provider, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, + } +} + +// ConditionalAttribute supports the case where an attribute selection may occur on a conditional +// expression, e.g. (cond ? a : b).c +func (r *attrFactory) ConditionalAttribute(id int64, expr Interpretable, t, f Attribute) Attribute { + return &conditionalAttribute{ + id: id, + expr: expr, + truthy: t, + falsy: f, + adapter: r.adapter, + fac: r, + } +} + +// MaybeAttribute collects variants of unchecked AbsoluteAttribute values which could either be +// direct variable accesses or some combination of variable access with qualification. +func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute { + return &maybeAttribute{ + id: id, + attrs: []NamespacedAttribute{ + r.AbsoluteAttribute(id, r.container.ResolveCandidateNames(name)...), + }, + adapter: r.adapter, + provider: r.provider, + fac: r, + } +} + +// RelativeAttribute refers to an expression and an optional qualifier path. +func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute { + return &relativeAttribute{ + id: id, + operand: operand, + qualifiers: []Qualifier{}, + adapter: r.adapter, + fac: r, + errorOnBadPresenceTest: r.errorOnBadPresenceTest, + } +} + +// NewQualifier is an implementation of the AttributeFactory interface. +func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, opt bool) (Qualifier, error) { + // Before creating a new qualifier check to see if this is a protobuf message field access. + // If so, use the precomputed GetFrom qualification method rather than the standard + // stringQualifier. + str, isStr := val.(string) + if isStr && objType != nil && objType.Kind() == types.StructKind { + ft, found := r.provider.FindStructFieldType(objType.TypeName(), str) + if found && ft.IsSet != nil && ft.GetFrom != nil { + return &fieldQualifier{ + id: qualID, + Name: str, + FieldType: ft, + adapter: r.adapter, + optional: opt, + }, nil + } + } + return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest) +} + +type absoluteAttribute struct { + id int64 + // namespaceNames represent the names the variable could have based on declared container + // (package) of the expression. + namespaceNames []string + qualifiers []Qualifier + adapter types.Adapter + provider types.Provider + fac AttributeFactory + + errorOnBadPresenceTest bool +} + +// ID implements the Attribute interface method. +func (a *absoluteAttribute) ID() int64 { + qualCount := len(a.qualifiers) + if qualCount == 0 { + return a.id + } + return a.qualifiers[qualCount-1].ID() +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *absoluteAttribute) IsOptional() bool { + return false +} + +// AddQualifier implements the Attribute interface method. +func (a *absoluteAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + a.qualifiers = append(a.qualifiers, qual) + return a, nil +} + +// CandidateVariableNames implements the NamespaceAttribute interface method. +func (a *absoluteAttribute) CandidateVariableNames() []string { + return a.namespaceNames +} + +// Qualifiers returns the list of Qualifier instances associated with the namespaced attribute. +func (a *absoluteAttribute) Qualifiers() []Qualifier { + return a.qualifiers +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *absoluteAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *absoluteAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// String implements the Stringer interface method. +func (a *absoluteAttribute) String() string { + return fmt.Sprintf("id: %v, names: %v", a.id, a.namespaceNames) +} + +// Resolve returns the resolved Attribute value given the Activation, or error if the Attribute +// variable is not found, or if its Qualifiers cannot be applied successfully. +// +// If the variable name cannot be found as an Activation variable or in the TypeProvider as +// a type, then the result is `nil`, `error` with the error indicating the name of the first +// variable searched as missing. +func (a *absoluteAttribute) Resolve(vars Activation) (any, error) { + for _, nm := range a.namespaceNames { + // If the variable is found, process it. Otherwise, wait until the checks to + // determine whether the type is unknown before returning. + obj, found := vars.ResolveName(nm) + if found { + if celErr, ok := obj.(*types.Err); ok { + return nil, celErr.Unwrap() + } + obj, isOpt, err := applyQualifiers(vars, obj, a.qualifiers) + if err != nil { + return nil, err + } + if isOpt { + val := a.adapter.NativeToValue(obj) + if types.IsUnknown(val) { + return val, nil + } + return types.OptionalOf(val), nil + } + return obj, nil + } + // Attempt to resolve the qualified type name if the name is not a variable identifier. + typ, found := a.provider.FindIdent(nm) + if found { + if len(a.qualifiers) == 0 { + return typ, nil + } + } + } + var attrNames strings.Builder + for i, nm := range a.namespaceNames { + if i != 0 { + attrNames.WriteString(", ") + } + attrNames.WriteString(nm) + } + return nil, missingAttribute(attrNames.String()) +} + +type conditionalAttribute struct { + id int64 + expr Interpretable + truthy Attribute + falsy Attribute + adapter types.Adapter + fac AttributeFactory +} + +// ID is an implementation of the Attribute interface method. +func (a *conditionalAttribute) ID() int64 { + // There's a field access after the conditional. + if a.truthy.ID() == a.falsy.ID() { + return a.truthy.ID() + } + // Otherwise return the conditional id as the consistent id being tracked. + return a.id +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *conditionalAttribute) IsOptional() bool { + return false +} + +// AddQualifier appends the same qualifier to both sides of the conditional, in effect managing +// the qualification of alternate attributes. +func (a *conditionalAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + _, err := a.truthy.AddQualifier(qual) + if err != nil { + return nil, err + } + _, err = a.falsy.AddQualifier(qual) + if err != nil { + return nil, err + } + return a, nil +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *conditionalAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *conditionalAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// Resolve evaluates the condition, and then resolves the truthy or falsy branch accordingly. +func (a *conditionalAttribute) Resolve(vars Activation) (any, error) { + val := a.expr.Eval(vars) + if val == types.True { + return a.truthy.Resolve(vars) + } + if val == types.False { + return a.falsy.Resolve(vars) + } + if types.IsUnknown(val) { + return val, nil + } + return nil, types.MaybeNoSuchOverloadErr(val).(*types.Err) +} + +// String is an implementation of the Stringer interface method. +func (a *conditionalAttribute) String() string { + return fmt.Sprintf("id: %v, truthy attribute: %v, falsy attribute: %v", a.id, a.truthy, a.falsy) +} + +type maybeAttribute struct { + id int64 + attrs []NamespacedAttribute + adapter types.Adapter + provider types.Provider + fac AttributeFactory +} + +// ID is an implementation of the Attribute interface method. +func (a *maybeAttribute) ID() int64 { + return a.attrs[0].ID() +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *maybeAttribute) IsOptional() bool { + return false +} + +// AddQualifier adds a qualifier to each possible attribute variant, and also creates +// a new namespaced variable from the qualified value. +// +// The algorithm for building the maybe attribute is as follows: +// +// 1. Create a maybe attribute from a simple identifier when it occurs in a parsed-only expression +// +// mb = MaybeAttribute(, "a") +// +// Initializing the maybe attribute creates an absolute attribute internally which includes the +// possible namespaced names of the attribute. In this example, let's assume we are in namespace +// 'ns', then the maybe is either one of the following variable names: +// +// possible variables names -- ns.a, a +// +// 2. Adding a qualifier to the maybe means that the variable name could be a longer qualified +// name, or a field selection on one of the possible variable names produced earlier: +// +// mb.AddQualifier("b") +// +// possible variables names -- ns.a.b, a.b +// possible field selection -- ns.a['b'], a['b'] +// +// If none of the attributes within the maybe resolves a value, the result is an error. +func (a *maybeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + str := "" + isStr := false + cq, isConst := qual.(ConstantQualifier) + if isConst { + str, isStr = cq.Value().Value().(string) + } + var augmentedNames []string + // First add the qualifier to all existing attributes in the oneof. + for _, attr := range a.attrs { + if isStr && len(attr.Qualifiers()) == 0 { + candidateVars := attr.CandidateVariableNames() + augmentedNames = make([]string, len(candidateVars)) + for i, name := range candidateVars { + augmentedNames[i] = fmt.Sprintf("%s.%s", name, str) + } + } + _, err := attr.AddQualifier(qual) + if err != nil { + return nil, err + } + } + // Next, ensure the most specific variable / type reference is searched first. + if len(augmentedNames) != 0 { + a.attrs = append([]NamespacedAttribute{a.fac.AbsoluteAttribute(qual.ID(), augmentedNames...)}, a.attrs...) + } + return a, nil +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *maybeAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *maybeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// Resolve follows the variable resolution rules to determine whether the attribute is a variable +// or a field selection. +func (a *maybeAttribute) Resolve(vars Activation) (any, error) { + var maybeErr error + for _, attr := range a.attrs { + obj, err := attr.Resolve(vars) + // Return an error if one is encountered. + if err != nil { + resErr, ok := err.(*resolutionError) + if !ok { + return nil, err + } + // If this was not a missing variable error, return it. + if !resErr.isMissingAttribute() { + return nil, err + } + // When the variable is missing in a maybe attribute we defer erroring. + if maybeErr == nil { + maybeErr = resErr + } + // Continue attempting to resolve possible variables. + continue + } + return obj, nil + } + // Else, produce a no such attribute error. + return nil, maybeErr +} + +// String is an implementation of the Stringer interface method. +func (a *maybeAttribute) String() string { + return fmt.Sprintf("id: %v, attributes: %v", a.id, a.attrs) +} + +type relativeAttribute struct { + id int64 + operand Interpretable + qualifiers []Qualifier + adapter types.Adapter + fac AttributeFactory + + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Attribute interface method. +func (a *relativeAttribute) ID() int64 { + qualCount := len(a.qualifiers) + if qualCount == 0 { + return a.id + } + return a.qualifiers[qualCount-1].ID() +} + +// IsOptional returns trivially false for an attribute as the attribute represents a fully +// qualified variable name. If the attribute is used in an optional manner, then an attrQualifier +// is created and marks the attribute as optional. +func (a *relativeAttribute) IsOptional() bool { + return false +} + +// AddQualifier implements the Attribute interface method. +func (a *relativeAttribute) AddQualifier(qual Qualifier) (Attribute, error) { + a.qualifiers = append(a.qualifiers, qual) + return a, nil +} + +// Qualify is an implementation of the Qualifier interface method. +func (a *relativeAttribute) Qualify(vars Activation, obj any) (any, error) { + return attrQualify(a.fac, vars, obj, a) +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (a *relativeAttribute) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return attrQualifyIfPresent(a.fac, vars, obj, a, presenceOnly) +} + +// Resolve expression value and qualifier relative to the expression result. +func (a *relativeAttribute) Resolve(vars Activation) (any, error) { + // First, evaluate the operand. + v := a.operand.Eval(vars) + if types.IsError(v) { + return nil, v.(*types.Err) + } + if types.IsUnknown(v) { + return v, nil + } + obj, isOpt, err := applyQualifiers(vars, v, a.qualifiers) + if err != nil { + return nil, err + } + if isOpt { + val := a.adapter.NativeToValue(obj) + if types.IsUnknown(val) { + return val, nil + } + return types.OptionalOf(val), nil + } + return obj, nil +} + +// String is an implementation of the Stringer interface method. +func (a *relativeAttribute) String() string { + return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand) +} + +func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) { + var qual Qualifier + switch val := v.(type) { + case Attribute: + // Note, attributes are initially identified as non-optional since they represent a top-level + // field access; however, when used as a relative qualifier, e.g. a[?b.c], then an attrQualifier + // is created which intercepts the IsOptional check for the attribute in order to return the + // correct result. + return &attrQualifier{ + id: id, + Attribute: val, + optional: opt, + }, nil + case string: + qual = &stringQualifier{ + id: id, + value: val, + celValue: types.String(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case int: + qual = &intQualifier{ + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case int32: + qual = &intQualifier{ + id: id, + value: int64(val), + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case int64: + qual = &intQualifier{ + id: id, + value: val, + celValue: types.Int(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case uint: + qual = &uintQualifier{ + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case uint32: + qual = &uintQualifier{ + id: id, + value: uint64(val), + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case uint64: + qual = &uintQualifier{ + id: id, + value: val, + celValue: types.Uint(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case bool: + qual = &boolQualifier{ + id: id, + value: val, + celValue: types.Bool(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case float32: + qual = &doubleQualifier{ + id: id, + value: float64(val), + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case float64: + qual = &doubleQualifier{ + id: id, + value: val, + celValue: types.Double(val), + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.String: + qual = &stringQualifier{ + id: id, + value: string(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Int: + qual = &intQualifier{ + id: id, + value: int64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Uint: + qual = &uintQualifier{ + id: id, + value: uint64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Bool: + qual = &boolQualifier{ + id: id, + value: bool(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case types.Double: + qual = &doubleQualifier{ + id: id, + value: float64(val), + celValue: val, + adapter: adapter, + optional: opt, + errorOnBadPresenceTest: errorOnBadPresenceTest, + } + case *types.Unknown: + qual = &unknownQualifier{id: id, value: val} + default: + if q, ok := v.(Qualifier); ok { + return q, nil + } + return nil, fmt.Errorf("invalid qualifier type: %T", v) + } + return qual, nil +} + +type attrQualifier struct { + id int64 + Attribute + optional bool +} + +// ID implements the Qualifier interface method and returns the qualification instruction id +// rather than the attribute id. +func (q *attrQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *attrQualifier) IsOptional() bool { + return q.optional +} + +type stringQualifier struct { + id int64 + value string + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *stringQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *stringQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *stringQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *stringQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + s := q.value + switch o := obj.(type) { + case map[string]any: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]string: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]int: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]int32: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]int64: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]uint: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]uint32: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]uint64: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]float32: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]float64: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + case map[string]bool: + obj, isKey := o[s] + if isKey { + return obj, true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *stringQualifier) Value() ref.Val { + return q.celValue +} + +type intQualifier struct { + id int64 + value int64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *intQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *intQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *intQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *intQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + i := q.value + var isMap bool + switch o := obj.(type) { + // The specialized map types supported by an int qualifier are considerably fewer than the set + // of specialized map types supported by string qualifiers since they are less frequently used + // than string-based map keys. Additional specializations may be added in the future if + // desired. + case map[int]any: + isMap = true + obj, isKey := o[int(i)] + if isKey { + return obj, true, nil + } + case map[int32]any: + isMap = true + obj, isKey := o[int32(i)] + if isKey { + return obj, true, nil + } + case map[int64]any: + isMap = true + obj, isKey := o[i] + if isKey { + return obj, true, nil + } + case []any: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []string: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []int: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []int32: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []int64: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []uint: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []uint32: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []uint64: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []float32: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []float64: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + case []bool: + isIndex := i >= 0 && i < int64(len(o)) + if isIndex { + return o[i], true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + if isMap { + return nil, false, missingKey(q.celValue) + } + return nil, false, missingIndex(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *intQualifier) Value() ref.Val { + return q.celValue +} + +type uintQualifier struct { + id int64 + value uint64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *uintQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *uintQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *uintQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *uintQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + u := q.value + switch o := obj.(type) { + // The specialized map types supported by a uint qualifier are considerably fewer than the set + // of specialized map types supported by string qualifiers since they are less frequently used + // than string-based map keys. Additional specializations may be added in the future if + // desired. + case map[uint]any: + obj, isKey := o[uint(u)] + if isKey { + return obj, true, nil + } + case map[uint32]any: + obj, isKey := o[uint32(u)] + if isKey { + return obj, true, nil + } + case map[uint64]any: + obj, isKey := o[u] + if isKey { + return obj, true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *uintQualifier) Value() ref.Val { + return q.celValue +} + +type boolQualifier struct { + id int64 + value bool + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *boolQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *boolQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *boolQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *boolQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + b := q.value + switch o := obj.(type) { + case map[bool]any: + obj, isKey := o[b] + if isKey { + return obj, true, nil + } + default: + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(q.celValue) +} + +// Value implements the ConstantQualifier interface +func (q *boolQualifier) Value() ref.Val { + return q.celValue +} + +// fieldQualifier indicates that the qualification is a well-defined field with a known +// field type. When the field type is known this can be used to improve the speed and +// efficiency of field resolution. +type fieldQualifier struct { + id int64 + Name string + FieldType *types.FieldType + adapter types.Adapter + optional bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *fieldQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *fieldQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *fieldQualifier) Qualify(vars Activation, obj any) (any, error) { + if rv, ok := obj.(ref.Val); ok { + obj = rv.Value() + } + val, err := q.FieldType.GetFrom(obj) + if err != nil { + return nil, err + } + return val, nil +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *fieldQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + if rv, ok := obj.(ref.Val); ok { + obj = rv.Value() + } + if !q.FieldType.IsSet(obj) { + return nil, false, nil + } + if presenceOnly { + return nil, true, nil + } + val, err := q.FieldType.GetFrom(obj) + if err != nil { + return nil, false, err + } + return val, true, nil +} + +// Value implements the ConstantQualifier interface +func (q *fieldQualifier) Value() ref.Val { + return types.String(q.Name) +} + +// doubleQualifier qualifies a CEL object, map, or list using a double value. +// +// This qualifier is used for working with dynamic data like JSON or protobuf.Any where the value +// type may not be known ahead of time and may not conform to the standard types supported as valid +// protobuf map key types. +type doubleQualifier struct { + id int64 + value float64 + celValue ref.Val + adapter types.Adapter + optional bool + errorOnBadPresenceTest bool +} + +// ID is an implementation of the Qualifier interface method. +func (q *doubleQualifier) ID() int64 { + return q.id +} + +// IsOptional implements the Qualifier interface method. +func (q *doubleQualifier) IsOptional() bool { + return q.optional +} + +// Qualify implements the Qualifier interface method. +func (q *doubleQualifier) Qualify(vars Activation, obj any) (any, error) { + val, _, err := q.qualifyInternal(vars, obj, false, false) + return val, err +} + +func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.qualifyInternal(vars, obj, true, presenceOnly) +} + +func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) { + return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest) +} + +// Value implements the ConstantQualifier interface +func (q *doubleQualifier) Value() ref.Val { + return q.celValue +} + +// unknownQualifier is a simple qualifier which always returns a preconfigured set of unknown values +// for any value subject to qualification. This is consistent with CEL's unknown handling elsewhere. +type unknownQualifier struct { + id int64 + value *types.Unknown +} + +// ID is an implementation of the Qualifier interface method. +func (q *unknownQualifier) ID() int64 { + return q.id +} + +// IsOptional returns trivially false as an the unknown value is always returned. +func (q *unknownQualifier) IsOptional() bool { + return false +} + +// Qualify returns the unknown value associated with this qualifier. +func (q *unknownQualifier) Qualify(vars Activation, obj any) (any, error) { + return q.value, nil +} + +// QualifyIfPresent is an implementation of the Qualifier interface method. +func (q *unknownQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + return q.value, true, nil +} + +// Value implements the ConstantQualifier interface +func (q *unknownQualifier) Value() ref.Val { + return q.value +} + +func applyQualifiers(vars Activation, obj any, qualifiers []Qualifier) (any, bool, error) { + optObj, isOpt := obj.(*types.Optional) + if isOpt { + if !optObj.HasValue() { + return optObj, false, nil + } + obj = optObj.GetValue().Value() + } + + var err error + for _, qual := range qualifiers { + var qualObj any + isOpt = isOpt || qual.IsOptional() + if isOpt { + var present bool + qualObj, present, err = qual.QualifyIfPresent(vars, obj, false) + if err != nil { + return nil, false, err + } + if !present { + // We return optional none here with a presence of 'false' as the layers + // above will attempt to call types.OptionalOf() on a present value if any + // of the qualifiers is optional. + return types.OptionalNone, false, nil + } + } else { + qualObj, err = qual.Qualify(vars, obj) + if err != nil { + return nil, false, err + } + } + obj = qualObj + } + return obj, isOpt, nil +} + +// attrQualify performs a qualification using the result of an attribute evaluation. +func attrQualify(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute) (any, error) { + val, err := qualAttr.Resolve(vars) + if err != nil { + return nil, err + } + qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional()) + if err != nil { + return nil, err + } + return qual.Qualify(vars, obj) +} + +// attrQualifyIfPresent conditionally performs the qualification of the result of attribute is present +// on the target object. +func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAttr Attribute, + presenceOnly bool) (any, bool, error) { + val, err := qualAttr.Resolve(vars) + if err != nil { + return nil, false, err + } + qual, err := fac.NewQualifier(nil, qualAttr.ID(), val, qualAttr.IsOptional()) + if err != nil { + return nil, false, err + } + return qual.QualifyIfPresent(vars, obj, presenceOnly) +} + +// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and +// apply the qualifier with the option to presence test field accesses before retrieving field values. +func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) { + celVal := adapter.NativeToValue(obj) + switch v := celVal.(type) { + case *types.Unknown: + return v, true, nil + case *types.Err: + return nil, false, v + case traits.Mapper: + val, found := v.Find(idx) + // If the index is of the wrong type for the map, then it is possible + // for the Find call to produce an error. + if types.IsError(val) { + return nil, false, val.(*types.Err) + } + if found { + return val, true, nil + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingKey(idx) + case traits.Lister: + // If the index argument is not a valid numeric type, then it is possible + // for the index operation to produce an error. + i, err := types.IndexOrError(idx) + if err != nil { + return nil, false, err + } + celIndex := types.Int(i) + if i >= 0 && celIndex < v.Size().(types.Int) { + return v.Get(idx), true, nil + } + if presenceTest { + return nil, false, nil + } + return nil, false, missingIndex(idx) + case traits.Indexer: + if presenceTest { + ft, ok := v.(traits.FieldTester) + if ok { + presence := ft.IsSet(idx) + if types.IsError(presence) { + return nil, false, presence.(*types.Err) + } + // If not found or presence only test, then return. + // Otherwise, if found, obtain the value later on. + if presenceOnly || presence == types.False { + return nil, presence == types.True, nil + } + } + } + val := v.Get(idx) + if types.IsError(val) { + return nil, false, val.(*types.Err) + } + return val, true, nil + default: + if presenceTest && !errorOnBadPresenceTest { + return nil, false, nil + } + return nil, false, missingKey(idx) + } +} + +// resolutionError is a custom error type which encodes the different error states which may +// occur during attribute resolution. +type resolutionError struct { + missingAttribute string + missingIndex ref.Val + missingKey ref.Val +} + +func (e *resolutionError) isMissingAttribute() bool { + return e.missingAttribute != "" +} + +func missingIndex(missing ref.Val) *resolutionError { + return &resolutionError{ + missingIndex: missing, + } +} + +func missingKey(missing ref.Val) *resolutionError { + return &resolutionError{ + missingKey: missing, + } +} + +func missingAttribute(attr string) *resolutionError { + return &resolutionError{ + missingAttribute: attr, + } +} + +// Error implements the error interface method. +func (e *resolutionError) Error() string { + if e.missingKey != nil { + return fmt.Sprintf("no such key: %v", e.missingKey) + } + if e.missingIndex != nil { + return fmt.Sprintf("index out of bounds: %v", e.missingIndex) + } + if e.missingAttribute != "" { + return fmt.Sprintf("no such attribute(s): %s", e.missingAttribute) + } + return "invalid attribute" +} + +// Is implements the errors.Is() method used by more recent versions of Go. +func (e *resolutionError) Is(err error) bool { + return err.Error() == e.Error() +} diff --git a/vendor/github.com/google/cel-go/interpreter/decorators.go b/vendor/github.com/google/cel-go/interpreter/decorators.go new file mode 100644 index 00000000..502db35f --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/decorators.go @@ -0,0 +1,272 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// InterpretableDecorator is a functional interface for decorating or replacing +// Interpretable expression nodes at construction time. +type InterpretableDecorator func(Interpretable) (Interpretable, error) + +// decObserveEval records evaluation state into an EvalState object. +func decObserveEval(observer EvalObserver) InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + switch inst := i.(type) { + case *evalWatch, *evalWatchAttr, *evalWatchConst, *evalWatchConstructor: + // these instruction are already watching, return straight-away. + return i, nil + case InterpretableAttribute: + return &evalWatchAttr{ + InterpretableAttribute: inst, + observer: observer, + }, nil + case InterpretableConst: + return &evalWatchConst{ + InterpretableConst: inst, + observer: observer, + }, nil + case InterpretableConstructor: + return &evalWatchConstructor{ + constructor: inst, + observer: observer, + }, nil + default: + return &evalWatch{ + Interpretable: i, + observer: observer, + }, nil + } + } +} + +// decInterruptFolds creates an intepretable decorator which marks comprehensions as interruptable +// where the interrupt state is communicated via a hidden variable on the Activation. +func decInterruptFolds() InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + fold, ok := i.(*evalFold) + if !ok { + return i, nil + } + fold.interruptable = true + return fold, nil + } +} + +// decDisableShortcircuits ensures that all branches of an expression will be evaluated, no short-circuiting. +func decDisableShortcircuits() InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + switch expr := i.(type) { + case *evalOr: + return &evalExhaustiveOr{ + id: expr.id, + terms: expr.terms, + }, nil + case *evalAnd: + return &evalExhaustiveAnd{ + id: expr.id, + terms: expr.terms, + }, nil + case *evalFold: + expr.exhaustive = true + return expr, nil + case InterpretableAttribute: + cond, isCond := expr.Attr().(*conditionalAttribute) + if isCond { + return &evalExhaustiveConditional{ + id: cond.id, + attr: cond, + adapter: expr.Adapter(), + }, nil + } + } + return i, nil + } +} + +// decOptimize optimizes the program plan by looking for common evaluation patterns and +// conditionally precomputing the result. +// - build list and map values with constant elements. +// - convert 'in' operations to set membership tests if possible. +func decOptimize() InterpretableDecorator { + return func(i Interpretable) (Interpretable, error) { + switch inst := i.(type) { + case *evalList: + return maybeBuildListLiteral(i, inst) + case *evalMap: + return maybeBuildMapLiteral(i, inst) + case InterpretableCall: + if inst.OverloadID() == overloads.InList { + return maybeOptimizeSetMembership(i, inst) + } + if overloads.IsTypeConversionFunction(inst.Function()) { + return maybeOptimizeConstUnary(i, inst) + } + } + return i, nil + } +} + +// decRegexOptimizer compiles regex pattern string constants. +func decRegexOptimizer(regexOptimizations ...*RegexOptimization) InterpretableDecorator { + functionMatchMap := make(map[string]*RegexOptimization) + overloadMatchMap := make(map[string]*RegexOptimization) + for _, m := range regexOptimizations { + functionMatchMap[m.Function] = m + if m.OverloadID != "" { + overloadMatchMap[m.OverloadID] = m + } + } + + return func(i Interpretable) (Interpretable, error) { + call, ok := i.(InterpretableCall) + if !ok { + return i, nil + } + + var matcher *RegexOptimization + var found bool + if call.OverloadID() != "" { + matcher, found = overloadMatchMap[call.OverloadID()] + } + if !found { + matcher, found = functionMatchMap[call.Function()] + } + if !found || matcher.RegexIndex >= len(call.Args()) { + return i, nil + } + args := call.Args() + regexArg := args[matcher.RegexIndex] + regexStr, isConst := regexArg.(InterpretableConst) + if !isConst { + return i, nil + } + pattern, ok := regexStr.Value().(types.String) + if !ok { + return i, nil + } + return matcher.Factory(call, string(pattern)) + } +} + +func maybeOptimizeConstUnary(i Interpretable, call InterpretableCall) (Interpretable, error) { + args := call.Args() + if len(args) != 1 { + return i, nil + } + _, isConst := args[0].(InterpretableConst) + if !isConst { + return i, nil + } + val := call.Eval(EmptyActivation()) + if types.IsError(val) { + return nil, val.(*types.Err) + } + return NewConstValue(call.ID(), val), nil +} + +func maybeBuildListLiteral(i Interpretable, l *evalList) (Interpretable, error) { + for _, elem := range l.elems { + _, isConst := elem.(InterpretableConst) + if !isConst { + return i, nil + } + } + return NewConstValue(l.ID(), l.Eval(EmptyActivation())), nil +} + +func maybeBuildMapLiteral(i Interpretable, mp *evalMap) (Interpretable, error) { + for idx, key := range mp.keys { + _, isConst := key.(InterpretableConst) + if !isConst { + return i, nil + } + _, isConst = mp.vals[idx].(InterpretableConst) + if !isConst { + return i, nil + } + } + return NewConstValue(mp.ID(), mp.Eval(EmptyActivation())), nil +} + +// maybeOptimizeSetMembership may convert an 'in' operation against a list to map key membership +// test if the following conditions are true: +// - the list is a constant with homogeneous element types. +// - the elements are all of primitive type. +func maybeOptimizeSetMembership(i Interpretable, inlist InterpretableCall) (Interpretable, error) { + args := inlist.Args() + lhs := args[0] + rhs := args[1] + l, isConst := rhs.(InterpretableConst) + if !isConst { + return i, nil + } + // When the incoming binary call is flagged with as the InList overload, the value will + // always be convertible to a `traits.Lister` type. + list := l.Value().(traits.Lister) + if list.Size() == types.IntZero { + return NewConstValue(inlist.ID(), types.False), nil + } + it := list.Iterator() + valueSet := make(map[ref.Val]ref.Val) + for it.HasNext() == types.True { + elem := it.Next() + if !types.IsPrimitiveType(elem) || elem.Type() == types.BytesType { + // Note, non-primitive type are not yet supported, and []byte isn't hashable. + return i, nil + } + valueSet[elem] = types.True + switch ev := elem.(type) { + case types.Double: + iv := ev.ConvertToType(types.IntType) + // Ensure that only lossless conversions are added to the set + if !types.IsError(iv) && iv.Equal(ev) == types.True { + valueSet[iv] = types.True + } + // Ensure that only lossless conversions are added to the set + uv := ev.ConvertToType(types.UintType) + if !types.IsError(uv) && uv.Equal(ev) == types.True { + valueSet[uv] = types.True + } + case types.Int: + dv := ev.ConvertToType(types.DoubleType) + if !types.IsError(dv) { + valueSet[dv] = types.True + } + uv := ev.ConvertToType(types.UintType) + if !types.IsError(uv) { + valueSet[uv] = types.True + } + case types.Uint: + dv := ev.ConvertToType(types.DoubleType) + if !types.IsError(dv) { + valueSet[dv] = types.True + } + iv := ev.ConvertToType(types.IntType) + if !types.IsError(iv) { + valueSet[iv] = types.True + } + } + } + return &evalSetMembership{ + inst: inlist, + arg: lhs, + valueSet: valueSet, + }, nil +} diff --git a/vendor/github.com/google/cel-go/interpreter/dispatcher.go b/vendor/github.com/google/cel-go/interpreter/dispatcher.go new file mode 100644 index 00000000..8f0bdb7b --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/dispatcher.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + + "github.com/google/cel-go/common/functions" +) + +// Dispatcher resolves function calls to their appropriate overload. +type Dispatcher interface { + // Add one or more overloads, returning an error if any Overload has the same Overload#Name. + Add(overloads ...*functions.Overload) error + + // FindOverload returns an Overload definition matching the provided name. + FindOverload(overload string) (*functions.Overload, bool) + + // OverloadIds returns the set of all overload identifiers configured for dispatch. + OverloadIds() []string +} + +// NewDispatcher returns an empty Dispatcher instance. +func NewDispatcher() Dispatcher { + return &defaultDispatcher{ + overloads: make(map[string]*functions.Overload)} +} + +// ExtendDispatcher returns a Dispatcher which inherits the overloads of its parent, and +// provides an isolation layer between built-ins and extension functions which is useful +// for forward compatibility. +func ExtendDispatcher(parent Dispatcher) Dispatcher { + return &defaultDispatcher{ + parent: parent, + overloads: make(map[string]*functions.Overload)} +} + +// overloadMap helper type for indexing overloads by function name. +type overloadMap map[string]*functions.Overload + +// defaultDispatcher struct which contains an overload map. +type defaultDispatcher struct { + parent Dispatcher + overloads overloadMap +} + +// Add implements the Dispatcher.Add interface method. +func (d *defaultDispatcher) Add(overloads ...*functions.Overload) error { + for _, o := range overloads { + // add the overload unless an overload of the same name has already been provided. + if _, found := d.overloads[o.Operator]; found { + return fmt.Errorf("overload already exists '%s'", o.Operator) + } + // index the overload by function name. + d.overloads[o.Operator] = o + } + return nil +} + +// FindOverload implements the Dispatcher.FindOverload interface method. +func (d *defaultDispatcher) FindOverload(overload string) (*functions.Overload, bool) { + o, found := d.overloads[overload] + // Attempt to dispatch to an overload defined in the parent. + if !found && d.parent != nil { + return d.parent.FindOverload(overload) + } + return o, found +} + +// OverloadIds implements the Dispatcher interface method. +func (d *defaultDispatcher) OverloadIds() []string { + i := 0 + overloads := make([]string, len(d.overloads)) + for name := range d.overloads { + overloads[i] = name + i++ + } + if d.parent == nil { + return overloads + } + parentOverloads := d.parent.OverloadIds() + for _, pName := range parentOverloads { + if _, found := d.overloads[pName]; !found { + overloads = append(overloads, pName) + } + } + return overloads +} diff --git a/vendor/github.com/google/cel-go/interpreter/evalstate.go b/vendor/github.com/google/cel-go/interpreter/evalstate.go new file mode 100644 index 00000000..4bdd1fdc --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/evalstate.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "github.com/google/cel-go/common/types/ref" +) + +// EvalState tracks the values associated with expression ids during execution. +type EvalState interface { + // IDs returns the list of ids with recorded values. + IDs() []int64 + + // Value returns the observed value of the given expression id if found, and a nil false + // result if not. + Value(int64) (ref.Val, bool) + + // SetValue sets the observed value of the expression id. + SetValue(int64, ref.Val) + + // Reset clears the previously recorded expression values. + Reset() +} + +// evalState permits the mutation of evaluation state for a given expression id. +type evalState struct { + values map[int64]ref.Val +} + +// NewEvalState returns an EvalState instanced used to observe the intermediate +// evaluations of an expression. +func NewEvalState() EvalState { + return &evalState{ + values: make(map[int64]ref.Val), + } +} + +// IDs implements the EvalState interface method. +func (s *evalState) IDs() []int64 { + var ids []int64 + for k, v := range s.values { + if v != nil { + ids = append(ids, k) + } + } + return ids +} + +// Value is an implementation of the EvalState interface method. +func (s *evalState) Value(exprID int64) (ref.Val, bool) { + val, found := s.values[exprID] + return val, found +} + +// SetValue is an implementation of the EvalState interface method. +func (s *evalState) SetValue(exprID int64, val ref.Val) { + if val == nil { + delete(s.values, exprID) + } else { + s.values[exprID] = val + } +} + +// Reset implements the EvalState interface method. +func (s *evalState) Reset() { + s.values = map[int64]ref.Val{} +} diff --git a/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel new file mode 100644 index 00000000..4a80c3ea --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/functions/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//visibility:public"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "functions.go", + ], + importpath = "github.com/google/cel-go/interpreter/functions", + deps = [ + "//common/functions:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/interpreter/functions/functions.go b/vendor/github.com/google/cel-go/interpreter/functions/functions.go new file mode 100644 index 00000000..21ffb692 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/functions/functions.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package functions defines the standard builtin functions supported by the +// interpreter and as declared within the checker#StandardDeclarations. +package functions + +import fn "github.com/google/cel-go/common/functions" + +// Overload defines a named overload of a function, indicating an operand trait +// which must be present on the first argument to the overload as well as one +// of either a unary, binary, or function implementation. +// +// The majority of operators within the expression language are unary or binary +// and the specializations simplify the call contract for implementers of +// types with operator overloads. Any added complexity is assumed to be handled +// by the generic FunctionOp. +type Overload = fn.Overload + +// UnaryOp is a function that takes a single value and produces an output. +type UnaryOp = fn.UnaryOp + +// BinaryOp is a function that takes two values and produces an output. +type BinaryOp = fn.BinaryOp + +// FunctionOp is a function with accepts zero or more arguments and produces +// a value or error as a result. +type FunctionOp = fn.FunctionOp diff --git a/vendor/github.com/google/cel-go/interpreter/interpretable.go b/vendor/github.com/google/cel-go/interpreter/interpretable.go new file mode 100644 index 00000000..56123840 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/interpretable.go @@ -0,0 +1,1264 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// Interpretable can accept a given Activation and produce a value along with +// an accompanying EvalState which can be used to inspect whether additional +// data might be necessary to complete the evaluation. +type Interpretable interface { + // ID value corresponding to the expression node. + ID() int64 + + // Eval an Activation to produce an output. + Eval(activation Activation) ref.Val +} + +// InterpretableConst interface for tracking whether the Interpretable is a constant value. +type InterpretableConst interface { + Interpretable + + // Value returns the constant value of the instruction. + Value() ref.Val +} + +// InterpretableAttribute interface for tracking whether the Interpretable is an attribute. +type InterpretableAttribute interface { + Interpretable + + // Attr returns the Attribute value. + Attr() Attribute + + // Adapter returns the type adapter to be used for adapting resolved Attribute values. + Adapter() types.Adapter + + // AddQualifier proxies the Attribute.AddQualifier method. + // + // Note, this method may mutate the current attribute state. If the desire is to clone the + // Attribute, the Attribute should first be copied before adding the qualifier. Attributes + // are not copyable by default, so this is a capable that would need to be added to the + // AttributeFactory or specifically to the underlying Attribute implementation. + AddQualifier(Qualifier) (Attribute, error) + + // Qualify replicates the Attribute.Qualify method to permit extension and interception + // of object qualification. + Qualify(vars Activation, obj any) (any, error) + + // QualifyIfPresent qualifies the object if the qualifier is declared or defined on the object. + // The 'presenceOnly' flag indicates that the value is not necessary, just a boolean status as + // to whether the qualifier is present. + QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) + + // IsOptional indicates whether the resulting value is an optional type. + IsOptional() bool + + // Resolve returns the value of the Attribute given the current Activation. + Resolve(Activation) (any, error) +} + +// InterpretableCall interface for inspecting Interpretable instructions related to function calls. +type InterpretableCall interface { + Interpretable + + // Function returns the function name as it appears in text or mangled operator name as it + // appears in the operators.go file. + Function() string + + // OverloadID returns the overload id associated with the function specialization. + // Overload ids are stable across language boundaries and can be treated as synonymous with a + // unique function signature. + OverloadID() string + + // Args returns the normalized arguments to the function overload. + // For receiver-style functions, the receiver target is arg 0. + Args() []Interpretable +} + +// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map +// or struct. +type InterpretableConstructor interface { + Interpretable + + // InitVals returns all the list elements, map key and values or struct field values. + InitVals() []Interpretable + + // Type returns the type constructed. + Type() ref.Type +} + +// Core Interpretable implementations used during the program planning phase. + +type evalTestOnly struct { + id int64 + InterpretableAttribute +} + +// ID implements the Interpretable interface method. +func (test *evalTestOnly) ID() int64 { + return test.id +} + +// Eval implements the Interpretable interface method. +func (test *evalTestOnly) Eval(ctx Activation) ref.Val { + val, err := test.Resolve(ctx) + // Return an error if the resolve step fails + if err != nil { + return types.LabelErrNode(test.id, types.WrapErr(err)) + } + if optVal, isOpt := val.(*types.Optional); isOpt { + return types.Bool(optVal.HasValue()) + } + return test.Adapter().NativeToValue(val) +} + +// AddQualifier appends a qualifier that will always and only perform a presence test. +func (test *evalTestOnly) AddQualifier(q Qualifier) (Attribute, error) { + cq, ok := q.(ConstantQualifier) + if !ok { + return nil, fmt.Errorf("test only expressions must have constant qualifiers: %v", q) + } + return test.InterpretableAttribute.AddQualifier(&testOnlyQualifier{ConstantQualifier: cq}) +} + +type testOnlyQualifier struct { + ConstantQualifier +} + +// Qualify determines whether the test-only qualifier is present on the input object. +func (q *testOnlyQualifier) Qualify(vars Activation, obj any) (any, error) { + out, present, err := q.ConstantQualifier.QualifyIfPresent(vars, obj, true) + if err != nil { + return nil, err + } + if unk, isUnk := out.(types.Unknown); isUnk { + return unk, nil + } + if opt, isOpt := out.(types.Optional); isOpt { + return opt.HasValue(), nil + } + return present, nil +} + +// QualifyIfPresent returns whether the target field in the test-only expression is present. +func (q *testOnlyQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + // Only ever test for presence. + return q.ConstantQualifier.QualifyIfPresent(vars, obj, true) +} + +// QualifierValueEquals determines whether the test-only constant qualifier equals the input value. +func (q *testOnlyQualifier) QualifierValueEquals(value any) bool { + // The input qualifier will always be of type string + return q.ConstantQualifier.Value().Value() == value +} + +// NewConstValue creates a new constant valued Interpretable. +func NewConstValue(id int64, val ref.Val) InterpretableConst { + return &evalConst{ + id: id, + val: val, + } +} + +type evalConst struct { + id int64 + val ref.Val +} + +// ID implements the Interpretable interface method. +func (cons *evalConst) ID() int64 { + return cons.id +} + +// Eval implements the Interpretable interface method. +func (cons *evalConst) Eval(ctx Activation) ref.Val { + return cons.val +} + +// Value implements the InterpretableConst interface method. +func (cons *evalConst) Value() ref.Val { + return cons.val +} + +type evalOr struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (or *evalOr) ID() int64 { + return or.id +} + +// Eval implements the Interpretable interface method. +func (or *evalOr) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + for _, term := range or.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on true. + if ok && boolVal == types.True { + return types.True + } + if !ok { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + err = types.LabelErrNode(or.id, err) + } + } + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.False +} + +type evalAnd struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (and *evalAnd) ID() int64 { + return and.id +} + +// Eval implements the Interpretable interface method. +func (and *evalAnd) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + for _, term := range and.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on false. + if ok && boolVal == types.False { + return types.False + } + if !ok { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + err = types.LabelErrNode(and.id, err) + } + } + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.True +} + +type evalEq struct { + id int64 + lhs Interpretable + rhs Interpretable +} + +// ID implements the Interpretable interface method. +func (eq *evalEq) ID() int64 { + return eq.id +} + +// Eval implements the Interpretable interface method. +func (eq *evalEq) Eval(ctx Activation) ref.Val { + lVal := eq.lhs.Eval(ctx) + rVal := eq.rhs.Eval(ctx) + if types.IsUnknownOrError(lVal) { + return lVal + } + if types.IsUnknownOrError(rVal) { + return rVal + } + return types.Equal(lVal, rVal) +} + +// Function implements the InterpretableCall interface method. +func (*evalEq) Function() string { + return operators.Equals +} + +// OverloadID implements the InterpretableCall interface method. +func (*evalEq) OverloadID() string { + return overloads.Equals +} + +// Args implements the InterpretableCall interface method. +func (eq *evalEq) Args() []Interpretable { + return []Interpretable{eq.lhs, eq.rhs} +} + +type evalNe struct { + id int64 + lhs Interpretable + rhs Interpretable +} + +// ID implements the Interpretable interface method. +func (ne *evalNe) ID() int64 { + return ne.id +} + +// Eval implements the Interpretable interface method. +func (ne *evalNe) Eval(ctx Activation) ref.Val { + lVal := ne.lhs.Eval(ctx) + rVal := ne.rhs.Eval(ctx) + if types.IsUnknownOrError(lVal) { + return lVal + } + if types.IsUnknownOrError(rVal) { + return rVal + } + return types.Bool(types.Equal(lVal, rVal) != types.True) +} + +// Function implements the InterpretableCall interface method. +func (*evalNe) Function() string { + return operators.NotEquals +} + +// OverloadID implements the InterpretableCall interface method. +func (*evalNe) OverloadID() string { + return overloads.NotEquals +} + +// Args implements the InterpretableCall interface method. +func (ne *evalNe) Args() []Interpretable { + return []Interpretable{ne.lhs, ne.rhs} +} + +type evalZeroArity struct { + id int64 + function string + overload string + impl functions.FunctionOp +} + +// ID implements the Interpretable interface method. +func (zero *evalZeroArity) ID() int64 { + return zero.id +} + +// Eval implements the Interpretable interface method. +func (zero *evalZeroArity) Eval(ctx Activation) ref.Val { + return types.LabelErrNode(zero.id, zero.impl()) +} + +// Function implements the InterpretableCall interface method. +func (zero *evalZeroArity) Function() string { + return zero.function +} + +// OverloadID implements the InterpretableCall interface method. +func (zero *evalZeroArity) OverloadID() string { + return zero.overload +} + +// Args returns the argument to the unary function. +func (zero *evalZeroArity) Args() []Interpretable { + return []Interpretable{} +} + +type evalUnary struct { + id int64 + function string + overload string + arg Interpretable + trait int + impl functions.UnaryOp + nonStrict bool +} + +// ID implements the Interpretable interface method. +func (un *evalUnary) ID() int64 { + return un.id +} + +// Eval implements the Interpretable interface method. +func (un *evalUnary) Eval(ctx Activation) ref.Val { + argVal := un.arg.Eval(ctx) + // Early return if the argument to the function is unknown or error. + strict := !un.nonStrict + if strict && types.IsUnknownOrError(argVal) { + return argVal + } + // If the implementation is bound and the argument value has the right traits required to + // invoke it, then call the implementation. + if un.impl != nil && (un.trait == 0 || (!strict && types.IsUnknownOrError(argVal)) || argVal.Type().HasTrait(un.trait)) { + return types.LabelErrNode(un.id, un.impl(argVal)) + } + // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the + // operand (arg0). + if argVal.Type().HasTrait(traits.ReceiverType) { + return types.LabelErrNode(un.id, argVal.(traits.Receiver).Receive(un.function, un.overload, []ref.Val{})) + } + return types.NewErrWithNodeID(un.id, "no such overload: %s", un.function) +} + +// Function implements the InterpretableCall interface method. +func (un *evalUnary) Function() string { + return un.function +} + +// OverloadID implements the InterpretableCall interface method. +func (un *evalUnary) OverloadID() string { + return un.overload +} + +// Args returns the argument to the unary function. +func (un *evalUnary) Args() []Interpretable { + return []Interpretable{un.arg} +} + +type evalBinary struct { + id int64 + function string + overload string + lhs Interpretable + rhs Interpretable + trait int + impl functions.BinaryOp + nonStrict bool +} + +// ID implements the Interpretable interface method. +func (bin *evalBinary) ID() int64 { + return bin.id +} + +// Eval implements the Interpretable interface method. +func (bin *evalBinary) Eval(ctx Activation) ref.Val { + lVal := bin.lhs.Eval(ctx) + rVal := bin.rhs.Eval(ctx) + // Early return if any argument to the function is unknown or error. + strict := !bin.nonStrict + if strict { + if types.IsUnknownOrError(lVal) { + return lVal + } + if types.IsUnknownOrError(rVal) { + return rVal + } + } + // If the implementation is bound and the argument value has the right traits required to + // invoke it, then call the implementation. + if bin.impl != nil && (bin.trait == 0 || (!strict && types.IsUnknownOrError(lVal)) || lVal.Type().HasTrait(bin.trait)) { + return types.LabelErrNode(bin.id, bin.impl(lVal, rVal)) + } + // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the + // operand (arg0). + if lVal.Type().HasTrait(traits.ReceiverType) { + return types.LabelErrNode(bin.id, lVal.(traits.Receiver).Receive(bin.function, bin.overload, []ref.Val{rVal})) + } + return types.NewErrWithNodeID(bin.id, "no such overload: %s", bin.function) +} + +// Function implements the InterpretableCall interface method. +func (bin *evalBinary) Function() string { + return bin.function +} + +// OverloadID implements the InterpretableCall interface method. +func (bin *evalBinary) OverloadID() string { + return bin.overload +} + +// Args returns the argument to the unary function. +func (bin *evalBinary) Args() []Interpretable { + return []Interpretable{bin.lhs, bin.rhs} +} + +type evalVarArgs struct { + id int64 + function string + overload string + args []Interpretable + trait int + impl functions.FunctionOp + nonStrict bool +} + +// NewCall creates a new call Interpretable. +func NewCall(id int64, function, overload string, args []Interpretable, impl functions.FunctionOp) InterpretableCall { + return &evalVarArgs{ + id: id, + function: function, + overload: overload, + args: args, + impl: impl, + } +} + +// ID implements the Interpretable interface method. +func (fn *evalVarArgs) ID() int64 { + return fn.id +} + +// Eval implements the Interpretable interface method. +func (fn *evalVarArgs) Eval(ctx Activation) ref.Val { + argVals := make([]ref.Val, len(fn.args)) + // Early return if any argument to the function is unknown or error. + strict := !fn.nonStrict + for i, arg := range fn.args { + argVals[i] = arg.Eval(ctx) + if strict && types.IsUnknownOrError(argVals[i]) { + return argVals[i] + } + } + // If the implementation is bound and the argument value has the right traits required to + // invoke it, then call the implementation. + arg0 := argVals[0] + if fn.impl != nil && (fn.trait == 0 || (!strict && types.IsUnknownOrError(arg0)) || arg0.Type().HasTrait(fn.trait)) { + return types.LabelErrNode(fn.id, fn.impl(argVals...)) + } + // Otherwise, if the argument is a ReceiverType attempt to invoke the receiver method on the + // operand (arg0). + if arg0.Type().HasTrait(traits.ReceiverType) { + return types.LabelErrNode(fn.id, arg0.(traits.Receiver).Receive(fn.function, fn.overload, argVals[1:])) + } + return types.NewErrWithNodeID(fn.id, "no such overload: %s %d", fn.function, fn.id) +} + +// Function implements the InterpretableCall interface method. +func (fn *evalVarArgs) Function() string { + return fn.function +} + +// OverloadID implements the InterpretableCall interface method. +func (fn *evalVarArgs) OverloadID() string { + return fn.overload +} + +// Args returns the argument to the unary function. +func (fn *evalVarArgs) Args() []Interpretable { + return fn.args +} + +type evalList struct { + id int64 + elems []Interpretable + optionals []bool + hasOptionals bool + adapter types.Adapter +} + +// ID implements the Interpretable interface method. +func (l *evalList) ID() int64 { + return l.id +} + +// Eval implements the Interpretable interface method. +func (l *evalList) Eval(ctx Activation) ref.Val { + elemVals := make([]ref.Val, 0, len(l.elems)) + // If any argument is unknown or error early terminate. + for i, elem := range l.elems { + elemVal := elem.Eval(ctx) + if types.IsUnknownOrError(elemVal) { + return elemVal + } + if l.hasOptionals && l.optionals[i] { + optVal, ok := elemVal.(*types.Optional) + if !ok { + return types.LabelErrNode(l.id, invalidOptionalElementInit(elemVal)) + } + if !optVal.HasValue() { + continue + } + elemVal = optVal.GetValue() + } + elemVals = append(elemVals, elemVal) + } + return l.adapter.NativeToValue(elemVals) +} + +func (l *evalList) InitVals() []Interpretable { + return l.elems +} + +func (l *evalList) Type() ref.Type { + return types.ListType +} + +type evalMap struct { + id int64 + keys []Interpretable + vals []Interpretable + optionals []bool + hasOptionals bool + adapter types.Adapter +} + +// ID implements the Interpretable interface method. +func (m *evalMap) ID() int64 { + return m.id +} + +// Eval implements the Interpretable interface method. +func (m *evalMap) Eval(ctx Activation) ref.Val { + entries := make(map[ref.Val]ref.Val) + // If any argument is unknown or error early terminate. + for i, key := range m.keys { + keyVal := key.Eval(ctx) + if types.IsUnknownOrError(keyVal) { + return keyVal + } + valVal := m.vals[i].Eval(ctx) + if types.IsUnknownOrError(valVal) { + return valVal + } + if m.hasOptionals && m.optionals[i] { + optVal, ok := valVal.(*types.Optional) + if !ok { + return types.LabelErrNode(m.id, invalidOptionalEntryInit(keyVal, valVal)) + } + if !optVal.HasValue() { + delete(entries, keyVal) + continue + } + valVal = optVal.GetValue() + } + entries[keyVal] = valVal + } + return m.adapter.NativeToValue(entries) +} + +func (m *evalMap) InitVals() []Interpretable { + if len(m.keys) != len(m.vals) { + return nil + } + result := make([]Interpretable, len(m.keys)+len(m.vals)) + idx := 0 + for i, k := range m.keys { + v := m.vals[i] + result[idx] = k + idx++ + result[idx] = v + idx++ + } + return result +} + +func (m *evalMap) Type() ref.Type { + return types.MapType +} + +type evalObj struct { + id int64 + typeName string + fields []string + vals []Interpretable + optionals []bool + hasOptionals bool + provider types.Provider +} + +// ID implements the Interpretable interface method. +func (o *evalObj) ID() int64 { + return o.id +} + +// Eval implements the Interpretable interface method. +func (o *evalObj) Eval(ctx Activation) ref.Val { + fieldVals := make(map[string]ref.Val) + // If any argument is unknown or error early terminate. + for i, field := range o.fields { + val := o.vals[i].Eval(ctx) + if types.IsUnknownOrError(val) { + return val + } + if o.hasOptionals && o.optionals[i] { + optVal, ok := val.(*types.Optional) + if !ok { + return types.LabelErrNode(o.id, invalidOptionalEntryInit(field, val)) + } + if !optVal.HasValue() { + delete(fieldVals, field) + continue + } + val = optVal.GetValue() + } + fieldVals[field] = val + } + return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals)) +} + +func (o *evalObj) InitVals() []Interpretable { + return o.vals +} + +func (o *evalObj) Type() ref.Type { + return types.NewObjectTypeValue(o.typeName) +} + +type evalFold struct { + id int64 + accuVar string + iterVar string + iterRange Interpretable + accu Interpretable + cond Interpretable + step Interpretable + result Interpretable + adapter types.Adapter + exhaustive bool + interruptable bool +} + +// ID implements the Interpretable interface method. +func (fold *evalFold) ID() int64 { + return fold.id +} + +// Eval implements the Interpretable interface method. +func (fold *evalFold) Eval(ctx Activation) ref.Val { + foldRange := fold.iterRange.Eval(ctx) + if !foldRange.Type().HasTrait(traits.IterableType) { + return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange) + } + // Configure the fold activation with the accumulator initial value. + accuCtx := varActivationPool.Get().(*varActivation) + accuCtx.parent = ctx + accuCtx.name = fold.accuVar + accuCtx.val = fold.accu.Eval(ctx) + // If the accumulator starts as an empty list, then the comprehension will build a list + // so create a mutable list to optimize the cost of the inner loop. + l, ok := accuCtx.val.(traits.Lister) + buildingList := false + if !fold.exhaustive && ok && l.Size() == types.IntZero { + buildingList = true + accuCtx.val = types.NewMutableList(fold.adapter) + } + iterCtx := varActivationPool.Get().(*varActivation) + iterCtx.parent = accuCtx + iterCtx.name = fold.iterVar + + interrupted := false + it := foldRange.(traits.Iterable).Iterator() + for it.HasNext() == types.True { + // Modify the iter var in the fold activation. + iterCtx.val = it.Next() + + // Evaluate the condition, terminate the loop if false. + cond := fold.cond.Eval(iterCtx) + condBool, ok := cond.(types.Bool) + if !fold.exhaustive && ok && condBool != types.True { + break + } + // Evaluate the evaluation step into accu var. + accuCtx.val = fold.step.Eval(iterCtx) + if fold.interruptable { + if stop, found := ctx.ResolveName("#interrupted"); found && stop == true { + interrupted = true + break + } + } + } + varActivationPool.Put(iterCtx) + if interrupted { + varActivationPool.Put(accuCtx) + return types.NewErr("operation interrupted") + } + + // Compute the result. + res := fold.result.Eval(accuCtx) + varActivationPool.Put(accuCtx) + // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result. + if !types.IsUnknownOrError(res) && buildingList { + if _, ok := res.(traits.MutableLister); ok { + res = res.(traits.MutableLister).ToImmutableList() + } + } + return res +} + +// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation +// plan via decorators. + +// evalSetMembership is an Interpretable implementation which tests whether an input value +// exists within the set of map keys used to model a set. +type evalSetMembership struct { + inst Interpretable + arg Interpretable + valueSet map[ref.Val]ref.Val +} + +// ID implements the Interpretable interface method. +func (e *evalSetMembership) ID() int64 { + return e.inst.ID() +} + +// Eval implements the Interpretable interface method. +func (e *evalSetMembership) Eval(ctx Activation) ref.Val { + val := e.arg.Eval(ctx) + if types.IsUnknownOrError(val) { + return val + } + if ret, found := e.valueSet[val]; found { + return ret + } + return types.False +} + +// evalWatch is an Interpretable implementation that wraps the execution of a given +// expression so that it may observe the computed value and send it to an observer. +type evalWatch struct { + Interpretable + observer EvalObserver +} + +// Eval implements the Interpretable interface method. +func (e *evalWatch) Eval(ctx Activation) ref.Val { + val := e.Interpretable.Eval(ctx) + e.observer(e.ID(), e.Interpretable, val) + return val +} + +// evalWatchAttr describes a watcher of an InterpretableAttribute Interpretable. +// +// Since the watcher may be selected against at a later stage in program planning, the watcher +// must implement the InterpretableAttribute interface by proxy. +type evalWatchAttr struct { + InterpretableAttribute + observer EvalObserver +} + +// AddQualifier creates a wrapper over the incoming qualifier which observes the qualification +// result. +func (e *evalWatchAttr) AddQualifier(q Qualifier) (Attribute, error) { + switch qual := q.(type) { + // By default, the qualifier is either a constant or an attribute + // There may be some custom cases where the attribute is neither. + case ConstantQualifier: + // Expose a method to test whether the qualifier matches the input pattern. + q = &evalWatchConstQual{ + ConstantQualifier: qual, + observer: e.observer, + adapter: e.Adapter(), + } + case *evalWatchAttr: + // Unwrap the evalWatchAttr since the observation will be applied during Qualify or + // QualifyIfPresent rather than Eval. + q = &evalWatchAttrQual{ + Attribute: qual.InterpretableAttribute, + observer: e.observer, + adapter: e.Adapter(), + } + case Attribute: + // Expose methods which intercept the qualification prior to being applied as a qualifier. + // Using this interface ensures that the qualifier is converted to a constant value one + // time during attribute pattern matching as the method embeds the Attribute interface + // needed to trip the conversion to a constant. + q = &evalWatchAttrQual{ + Attribute: qual, + observer: e.observer, + adapter: e.Adapter(), + } + default: + // This is likely a custom qualifier type. + q = &evalWatchQual{ + Qualifier: qual, + observer: e.observer, + adapter: e.Adapter(), + } + } + _, err := e.InterpretableAttribute.AddQualifier(q) + return e, err +} + +// Eval implements the Interpretable interface method. +func (e *evalWatchAttr) Eval(vars Activation) ref.Val { + val := e.InterpretableAttribute.Eval(vars) + e.observer(e.ID(), e.InterpretableAttribute, val) + return val +} + +// evalWatchConstQual observes the qualification of an object using a constant boolean, int, +// string, or uint. +type evalWatchConstQual struct { + ConstantQualifier + observer EvalObserver + adapter types.Adapter +} + +// Qualify observes the qualification of a object via a constant boolean, int, string, or uint. +func (e *evalWatchConstQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.ConstantQualifier.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.ConstantQualifier, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchConstQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.ConstantQualifier.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.ConstantQualifier, val) + } + return out, present, err +} + +// QualifierValueEquals tests whether the incoming value is equal to the qualifying constant. +func (e *evalWatchConstQual) QualifierValueEquals(value any) bool { + qve, ok := e.ConstantQualifier.(qualifierValueEquator) + return ok && qve.QualifierValueEquals(value) +} + +// evalWatchAttrQual observes the qualification of an object by a value computed at runtime. +type evalWatchAttrQual struct { + Attribute + observer EvalObserver + adapter ref.TypeAdapter +} + +// Qualify observes the qualification of a object via a value computed at runtime. +func (e *evalWatchAttrQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.Attribute.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.Attribute, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchAttrQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.Attribute.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.Attribute, val) + } + return out, present, err +} + +// evalWatchQual observes the qualification of an object by a value computed at runtime. +type evalWatchQual struct { + Qualifier + observer EvalObserver + adapter types.Adapter +} + +// Qualify observes the qualification of a object via a value computed at runtime. +func (e *evalWatchQual) Qualify(vars Activation, obj any) (any, error) { + out, err := e.Qualifier.Qualify(vars, obj) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else { + val = e.adapter.NativeToValue(out) + } + e.observer(e.ID(), e.Qualifier, val) + return out, err +} + +// QualifyIfPresent conditionally qualifies the variable and only records a value if one is present. +func (e *evalWatchQual) QualifyIfPresent(vars Activation, obj any, presenceOnly bool) (any, bool, error) { + out, present, err := e.Qualifier.QualifyIfPresent(vars, obj, presenceOnly) + var val ref.Val + if err != nil { + val = types.LabelErrNode(e.ID(), types.WrapErr(err)) + } else if out != nil { + val = e.adapter.NativeToValue(out) + } else if presenceOnly { + val = types.Bool(present) + } + if present || presenceOnly { + e.observer(e.ID(), e.Qualifier, val) + } + return out, present, err +} + +// evalWatchConst describes a watcher of an instConst Interpretable. +type evalWatchConst struct { + InterpretableConst + observer EvalObserver +} + +// Eval implements the Interpretable interface method. +func (e *evalWatchConst) Eval(vars Activation) ref.Val { + val := e.Value() + e.observer(e.ID(), e.InterpretableConst, val) + return val +} + +// evalExhaustiveOr is just like evalOr, but does not short-circuit argument evaluation. +type evalExhaustiveOr struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (or *evalExhaustiveOr) ID() int64 { + return or.id +} + +// Eval implements the Interpretable interface method. +func (or *evalExhaustiveOr) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + isTrue := false + for _, term := range or.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // flag the result as true + if ok && boolVal == types.True { + isTrue = true + } + if !ok && !isTrue { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } + } + if isTrue { + return types.True + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.False +} + +// evalExhaustiveAnd is just like evalAnd, but does not short-circuit argument evaluation. +type evalExhaustiveAnd struct { + id int64 + terms []Interpretable +} + +// ID implements the Interpretable interface method. +func (and *evalExhaustiveAnd) ID() int64 { + return and.id +} + +// Eval implements the Interpretable interface method. +func (and *evalExhaustiveAnd) Eval(ctx Activation) ref.Val { + var err ref.Val = nil + var unk *types.Unknown + isFalse := false + for _, term := range and.terms { + val := term.Eval(ctx) + boolVal, ok := val.(types.Bool) + // short-circuit on false. + if ok && boolVal == types.False { + isFalse = true + } + if !ok && !isFalse { + isUnk := false + unk, isUnk = types.MaybeMergeUnknowns(val, unk) + if !isUnk && err == nil { + if types.IsError(val) { + err = val + } else { + err = types.MaybeNoSuchOverloadErr(val) + } + } + } + } + if isFalse { + return types.False + } + if unk != nil { + return unk + } + if err != nil { + return err + } + return types.True +} + +// evalExhaustiveConditional is like evalConditional, but does not short-circuit argument +// evaluation. +type evalExhaustiveConditional struct { + id int64 + adapter types.Adapter + attr *conditionalAttribute +} + +// ID implements the Interpretable interface method. +func (cond *evalExhaustiveConditional) ID() int64 { + return cond.id +} + +// Eval implements the Interpretable interface method. +func (cond *evalExhaustiveConditional) Eval(ctx Activation) ref.Val { + cVal := cond.attr.expr.Eval(ctx) + tVal, tErr := cond.attr.truthy.Resolve(ctx) + fVal, fErr := cond.attr.falsy.Resolve(ctx) + cBool, ok := cVal.(types.Bool) + if !ok { + return types.ValOrErr(cVal, "no such overload") + } + if cBool { + if tErr != nil { + return types.LabelErrNode(cond.id, types.WrapErr(tErr)) + } + return cond.adapter.NativeToValue(tVal) + } + if fErr != nil { + return types.LabelErrNode(cond.id, types.WrapErr(fErr)) + } + return cond.adapter.NativeToValue(fVal) +} + +// evalAttr evaluates an Attribute value. +type evalAttr struct { + adapter types.Adapter + attr Attribute + optional bool +} + +var _ InterpretableAttribute = &evalAttr{} + +// ID of the attribute instruction. +func (a *evalAttr) ID() int64 { + return a.attr.ID() +} + +// AddQualifier implements the InterpretableAttribute interface method. +func (a *evalAttr) AddQualifier(qual Qualifier) (Attribute, error) { + attr, err := a.attr.AddQualifier(qual) + a.attr = attr + return attr, err +} + +// Attr implements the InterpretableAttribute interface method. +func (a *evalAttr) Attr() Attribute { + return a.attr +} + +// Adapter implements the InterpretableAttribute interface method. +func (a *evalAttr) Adapter() types.Adapter { + return a.adapter +} + +// Eval implements the Interpretable interface method. +func (a *evalAttr) Eval(ctx Activation) ref.Val { + v, err := a.attr.Resolve(ctx) + if err != nil { + return types.LabelErrNode(a.ID(), types.WrapErr(err)) + } + return a.adapter.NativeToValue(v) +} + +// Qualify proxies to the Attribute's Qualify method. +func (a *evalAttr) Qualify(ctx Activation, obj any) (any, error) { + return a.attr.Qualify(ctx, obj) +} + +// QualifyIfPresent proxies to the Attribute's QualifyIfPresent method. +func (a *evalAttr) QualifyIfPresent(ctx Activation, obj any, presenceOnly bool) (any, bool, error) { + return a.attr.QualifyIfPresent(ctx, obj, presenceOnly) +} + +func (a *evalAttr) IsOptional() bool { + return a.optional +} + +// Resolve proxies to the Attribute's Resolve method. +func (a *evalAttr) Resolve(ctx Activation) (any, error) { + return a.attr.Resolve(ctx) +} + +type evalWatchConstructor struct { + constructor InterpretableConstructor + observer EvalObserver +} + +// InitVals implements the InterpretableConstructor InitVals function. +func (c *evalWatchConstructor) InitVals() []Interpretable { + return c.constructor.InitVals() +} + +// Type implements the InterpretableConstructor Type function. +func (c *evalWatchConstructor) Type() ref.Type { + return c.constructor.Type() +} + +// ID implements the Interpretable ID function. +func (c *evalWatchConstructor) ID() int64 { + return c.constructor.ID() +} + +// Eval implements the Interpretable Eval function. +func (c *evalWatchConstructor) Eval(ctx Activation) ref.Val { + val := c.constructor.Eval(ctx) + c.observer(c.ID(), c.constructor, val) + return val +} + +func invalidOptionalEntryInit(field any, value ref.Val) ref.Val { + return types.NewErr("cannot initialize optional entry '%v' from non-optional value %v", field, value) +} + +func invalidOptionalElementInit(value ref.Val) ref.Val { + return types.NewErr("cannot initialize optional list element from non-optional value %v", value) +} diff --git a/vendor/github.com/google/cel-go/interpreter/interpreter.go b/vendor/github.com/google/cel-go/interpreter/interpreter.go new file mode 100644 index 00000000..0aca74d8 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/interpreter.go @@ -0,0 +1,185 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package interpreter provides functions to evaluate parsed expressions with +// the option to augment the evaluation with inputs and functions supplied at +// evaluation time. +package interpreter + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Interpreter generates a new Interpretable from a checked or unchecked expression. +type Interpreter interface { + // NewInterpretable creates an Interpretable from a checked expression and an + // optional list of InterpretableDecorator values. + NewInterpretable(exprAST *ast.AST, decorators ...InterpretableDecorator) (Interpretable, error) +} + +// EvalObserver is a functional interface that accepts an expression id and an observed value. +// The id identifies the expression that was evaluated, the programStep is the Interpretable or Qualifier that +// was evaluated and value is the result of the evaluation. +type EvalObserver func(id int64, programStep any, value ref.Val) + +// Observe constructs a decorator that calls all the provided observers in order after evaluating each Interpretable +// or Qualifier during program evaluation. +func Observe(observers ...EvalObserver) InterpretableDecorator { + if len(observers) == 1 { + return decObserveEval(observers[0]) + } + observeFn := func(id int64, programStep any, val ref.Val) { + for _, observer := range observers { + observer(id, programStep, val) + } + } + return decObserveEval(observeFn) +} + +// EvalCancelledError represents a cancelled program evaluation operation. +type EvalCancelledError struct { + Message string + // Type identifies the cause of the cancellation. + Cause CancellationCause +} + +func (e EvalCancelledError) Error() string { + return e.Message +} + +// CancellationCause enumerates the ways a program evaluation operation can be cancelled. +type CancellationCause int + +const ( + // ContextCancelled indicates that the operation was cancelled in response to a Golang context cancellation. + ContextCancelled CancellationCause = iota + + // CostLimitExceeded indicates that the operation was cancelled in response to the actual cost limit being + // exceeded. + CostLimitExceeded +) + +// TODO: Replace all usages of TrackState with EvalStateObserver + +// TrackState decorates each expression node with an observer which records the value +// associated with the given expression id. EvalState must be provided to the decorator. +// This decorator is not thread-safe, and the EvalState must be reset between Eval() +// calls. +// DEPRECATED: Please use EvalStateObserver instead. It composes gracefully with additional observers. +func TrackState(state EvalState) InterpretableDecorator { + return Observe(EvalStateObserver(state)) +} + +// EvalStateObserver provides an observer which records the value +// associated with the given expression id. EvalState must be provided to the observer. +// This decorator is not thread-safe, and the EvalState must be reset between Eval() +// calls. +func EvalStateObserver(state EvalState) EvalObserver { + return func(id int64, programStep any, val ref.Val) { + state.SetValue(id, val) + } +} + +// ExhaustiveEval replaces operations that short-circuit with versions that evaluate +// expressions and couples this behavior with the TrackState() decorator to provide +// insight into the evaluation state of the entire expression. EvalState must be +// provided to the decorator. This decorator is not thread-safe, and the EvalState +// must be reset between Eval() calls. +func ExhaustiveEval() InterpretableDecorator { + ex := decDisableShortcircuits() + return func(i Interpretable) (Interpretable, error) { + return ex(i) + } +} + +// InterruptableEval annotates comprehension loops with information that indicates they +// should check the `#interrupted` state within a custom Activation. +// +// The custom activation is currently managed higher up in the stack within the 'cel' package +// and should not require any custom support on behalf of callers. +func InterruptableEval() InterpretableDecorator { + return decInterruptFolds() +} + +// Optimize will pre-compute operations such as list and map construction and optimize +// call arguments to set membership tests. The set of optimizations will increase over time. +func Optimize() InterpretableDecorator { + return decOptimize() +} + +// RegexOptimization provides a way to replace an InterpretableCall for a regex function when the +// RegexIndex argument is a string constant. Typically, the Factory would compile the regex pattern at +// RegexIndex and report any errors (at program creation time) and then use the compiled regex for +// all regex function invocations. +type RegexOptimization struct { + // Function is the name of the function to optimize. + Function string + // OverloadID is the ID of the overload to optimize. + OverloadID string + // RegexIndex is the index position of the regex pattern argument. Only calls to the function where this argument is + // a string constant will be delegated to this optimizer. + RegexIndex int + // Factory constructs a replacement InterpretableCall node that optimizes the regex function call. Factory is + // provided with the unoptimized regex call and the string constant at the RegexIndex argument. + // The Factory may compile the regex for use across all invocations of the call, return any errors and + // return an interpreter.NewCall with the desired regex optimized function impl. + Factory func(call InterpretableCall, regexPattern string) (InterpretableCall, error) +} + +// CompileRegexConstants compiles regex pattern string constants at program creation time and reports any regex pattern +// compile errors. +func CompileRegexConstants(regexOptimizations ...*RegexOptimization) InterpretableDecorator { + return decRegexOptimizer(regexOptimizations...) +} + +type exprInterpreter struct { + dispatcher Dispatcher + container *containers.Container + provider types.Provider + adapter types.Adapter + attrFactory AttributeFactory +} + +// NewInterpreter builds an Interpreter from a Dispatcher and TypeProvider which will be used +// throughout the Eval of all Interpretable instances generated from it. +func NewInterpreter(dispatcher Dispatcher, + container *containers.Container, + provider types.Provider, + adapter types.Adapter, + attrFactory AttributeFactory) Interpreter { + return &exprInterpreter{ + dispatcher: dispatcher, + container: container, + provider: provider, + adapter: adapter, + attrFactory: attrFactory} +} + +// NewIntepretable implements the Interpreter interface method. +func (i *exprInterpreter) NewInterpretable( + checked *ast.AST, + decorators ...InterpretableDecorator) (Interpretable, error) { + p := newPlanner( + i.dispatcher, + i.provider, + i.adapter, + i.attrFactory, + i.container, + checked, + decorators...) + return p.Plan(checked.Expr()) +} diff --git a/vendor/github.com/google/cel-go/interpreter/optimizations.go b/vendor/github.com/google/cel-go/interpreter/optimizations.go new file mode 100644 index 00000000..2fc87e69 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/optimizations.go @@ -0,0 +1,46 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "regexp" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// MatchesRegexOptimization optimizes the 'matches' standard library function by compiling the regex pattern and +// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function +// call invocations. +var MatchesRegexOptimization = &RegexOptimization{ + Function: "matches", + RegexIndex: 1, + Factory: func(call InterpretableCall, regexPattern string) (InterpretableCall, error) { + compiledRegex, err := regexp.Compile(regexPattern) + if err != nil { + return nil, err + } + return NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(values ...ref.Val) ref.Val { + if len(values) != 2 { + return types.NoSuchOverloadErr() + } + in, ok := values[0].Value().(string) + if !ok { + return types.NoSuchOverloadErr() + } + return types.Bool(compiledRegex.MatchString(in)) + }), nil + }, +} diff --git a/vendor/github.com/google/cel-go/interpreter/planner.go b/vendor/github.com/google/cel-go/interpreter/planner.go new file mode 100644 index 00000000..cf371f95 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/planner.go @@ -0,0 +1,756 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "fmt" + "strings" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/containers" + "github.com/google/cel-go/common/functions" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" +) + +// interpretablePlanner creates an Interpretable evaluation plan from a proto Expr value. +type interpretablePlanner interface { + // Plan generates an Interpretable value (or error) from the input proto Expr. + Plan(expr ast.Expr) (Interpretable, error) +} + +// newPlanner creates an interpretablePlanner which references a Dispatcher, TypeProvider, +// TypeAdapter, Container, and CheckedExpr value. These pieces of data are used to resolve +// functions, types, and namespaced identifiers at plan time rather than at runtime since +// it only needs to be done once and may be semi-expensive to compute. +func newPlanner(disp Dispatcher, + provider types.Provider, + adapter types.Adapter, + attrFactory AttributeFactory, + cont *containers.Container, + exprAST *ast.AST, + decorators ...InterpretableDecorator) interpretablePlanner { + return &planner{ + disp: disp, + provider: provider, + adapter: adapter, + attrFactory: attrFactory, + container: cont, + refMap: exprAST.ReferenceMap(), + typeMap: exprAST.TypeMap(), + decorators: decorators, + } +} + +// planner is an implementation of the interpretablePlanner interface. +type planner struct { + disp Dispatcher + provider types.Provider + adapter types.Adapter + attrFactory AttributeFactory + container *containers.Container + refMap map[int64]*ast.ReferenceInfo + typeMap map[int64]*types.Type + decorators []InterpretableDecorator +} + +// Plan implements the interpretablePlanner interface. This implementation of the Plan method also +// applies decorators to each Interpretable generated as part of the overall plan. Decorators are +// useful for layering functionality into the evaluation that is not natively understood by CEL, +// such as state-tracking, expression re-write, and possibly efficient thread-safe memoization of +// repeated expressions. +func (p *planner) Plan(expr ast.Expr) (Interpretable, error) { + switch expr.Kind() { + case ast.CallKind: + return p.decorate(p.planCall(expr)) + case ast.IdentKind: + return p.decorate(p.planIdent(expr)) + case ast.LiteralKind: + return p.decorate(p.planConst(expr)) + case ast.SelectKind: + return p.decorate(p.planSelect(expr)) + case ast.ListKind: + return p.decorate(p.planCreateList(expr)) + case ast.MapKind: + return p.decorate(p.planCreateMap(expr)) + case ast.StructKind: + return p.decorate(p.planCreateStruct(expr)) + case ast.ComprehensionKind: + return p.decorate(p.planComprehension(expr)) + } + return nil, fmt.Errorf("unsupported expr: %v", expr) +} + +// decorate applies the InterpretableDecorator functions to the given Interpretable. +// Both the Interpretable and error generated by a Plan step are accepted as arguments +// for convenience. +func (p *planner) decorate(i Interpretable, err error) (Interpretable, error) { + if err != nil { + return nil, err + } + for _, dec := range p.decorators { + i, err = dec(i) + if err != nil { + return nil, err + } + } + return i, nil +} + +// planIdent creates an Interpretable that resolves an identifier from an Activation. +func (p *planner) planIdent(expr ast.Expr) (Interpretable, error) { + // Establish whether the identifier is in the reference map. + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) + } + // Create the possible attribute list for the unresolved reference. + ident := expr.AsIdent() + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.MaybeAttribute(expr.ID(), ident), + }, nil +} + +func (p *planner) planCheckedIdent(id int64, identRef *ast.ReferenceInfo) (Interpretable, error) { + // Plan a constant reference if this is the case for this simple identifier. + if identRef.Value != nil { + return NewConstValue(id, identRef.Value), nil + } + + // Check to see whether the type map indicates this is a type name. All types should be + // registered with the provider. + cType := p.typeMap[id] + if cType.Kind() == types.TypeKind { + cVal, found := p.provider.FindIdent(identRef.Name) + if !found { + return nil, fmt.Errorf("reference to undefined type: %s", identRef.Name) + } + return NewConstValue(id, cVal), nil + } + + // Otherwise, return the attribute for the resolved identifier name. + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.AbsoluteAttribute(id, identRef.Name), + }, nil +} + +// planSelect creates an Interpretable with either: +// +// a) selects a field from a map or proto. +// b) creates a field presence test for a select within a has() macro. +// c) resolves the select expression to a namespaced identifier. +func (p *planner) planSelect(expr ast.Expr) (Interpretable, error) { + // If the Select id appears in the reference map from the CheckedExpr proto then it is either + // a namespaced identifier or enum value. + if identRef, found := p.refMap[expr.ID()]; found { + return p.planCheckedIdent(expr.ID(), identRef) + } + + sel := expr.AsSelect() + // Plan the operand evaluation. + op, err := p.Plan(sel.Operand()) + if err != nil { + return nil, err + } + opType := p.typeMap[sel.Operand().ID()] + + // If the Select was marked TestOnly, this is a presence test. + // + // Note: presence tests are defined for structured (e.g. proto) and dynamic values (map, json) + // as follows: + // - True if the object field has a non-default value, e.g. obj.str != "" + // - True if the dynamic value has the field defined, e.g. key in map + // + // However, presence tests are not defined for qualified identifier names with primitive types. + // If a string named 'a.b.c' is declared in the environment and referenced within `has(a.b.c)`, + // it is not clear whether has should error or follow the convention defined for structured + // values. + + // Establish the attribute reference. + attr, isAttr := op.(InterpretableAttribute) + if !isAttr { + attr, err = p.relativeAttr(op.ID(), op, false) + if err != nil { + return nil, err + } + } + + // Build a qualifier for the attribute. + qual, err := p.attrFactory.NewQualifier(opType, expr.ID(), sel.FieldName(), false) + if err != nil { + return nil, err + } + // Modify the attribute to be test-only. + if sel.IsTestOnly() { + attr = &evalTestOnly{ + id: expr.ID(), + InterpretableAttribute: attr, + } + } + // Append the qualifier on the attribute. + _, err = attr.AddQualifier(qual) + return attr, err +} + +// planCall creates a callable Interpretable while specializing for common functions and invocation +// patterns. Specifically, conditional operators &&, ||, ?:, and (in)equality functions result in +// optimized Interpretable values. +func (p *planner) planCall(expr ast.Expr) (Interpretable, error) { + call := expr.AsCall() + target, fnName, oName := p.resolveFunction(expr) + argCount := len(call.Args()) + var offset int + if target != nil { + argCount++ + offset++ + } + + args := make([]Interpretable, argCount) + if target != nil { + arg, err := p.Plan(target) + if err != nil { + return nil, err + } + args[0] = arg + } + for i, argExpr := range call.Args() { + arg, err := p.Plan(argExpr) + if err != nil { + return nil, err + } + args[i+offset] = arg + } + + // Generate specialized Interpretable operators by function name if possible. + switch fnName { + case operators.LogicalAnd: + return p.planCallLogicalAnd(expr, args) + case operators.LogicalOr: + return p.planCallLogicalOr(expr, args) + case operators.Conditional: + return p.planCallConditional(expr, args) + case operators.Equals: + return p.planCallEqual(expr, args) + case operators.NotEquals: + return p.planCallNotEqual(expr, args) + case operators.Index: + return p.planCallIndex(expr, args, false) + case operators.OptSelect, operators.OptIndex: + return p.planCallIndex(expr, args, true) + } + + // Otherwise, generate Interpretable calls specialized by argument count. + // Try to find the specific function by overload id. + var fnDef *functions.Overload + if oName != "" { + fnDef, _ = p.disp.FindOverload(oName) + } + // If the overload id couldn't resolve the function, try the simple function name. + if fnDef == nil { + fnDef, _ = p.disp.FindOverload(fnName) + } + switch argCount { + case 0: + return p.planCallZero(expr, fnName, oName, fnDef) + case 1: + // If the FunctionOp has been used, then use it as it may exist for the purposes + // of dynamic dispatch within a singleton function implementation. + if fnDef != nil && fnDef.Unary == nil && fnDef.Function != nil { + return p.planCallVarArgs(expr, fnName, oName, fnDef, args) + } + return p.planCallUnary(expr, fnName, oName, fnDef, args) + case 2: + // If the FunctionOp has been used, then use it as it may exist for the purposes + // of dynamic dispatch within a singleton function implementation. + if fnDef != nil && fnDef.Binary == nil && fnDef.Function != nil { + return p.planCallVarArgs(expr, fnName, oName, fnDef, args) + } + return p.planCallBinary(expr, fnName, oName, fnDef, args) + default: + return p.planCallVarArgs(expr, fnName, oName, fnDef, args) + } +} + +// planCallZero generates a zero-arity callable Interpretable. +func (p *planner) planCallZero(expr ast.Expr, + function string, + overload string, + impl *functions.Overload) (Interpretable, error) { + if impl == nil || impl.Function == nil { + return nil, fmt.Errorf("no such overload: %s()", function) + } + return &evalZeroArity{ + id: expr.ID(), + function: function, + overload: overload, + impl: impl.Function, + }, nil +} + +// planCallUnary generates a unary callable Interpretable. +func (p *planner) planCallUnary(expr ast.Expr, + function string, + overload string, + impl *functions.Overload, + args []Interpretable) (Interpretable, error) { + var fn functions.UnaryOp + var trait int + var nonStrict bool + if impl != nil { + if impl.Unary == nil { + return nil, fmt.Errorf("no such overload: %s(arg)", function) + } + fn = impl.Unary + trait = impl.OperandTrait + nonStrict = impl.NonStrict + } + return &evalUnary{ + id: expr.ID(), + function: function, + overload: overload, + arg: args[0], + trait: trait, + impl: fn, + nonStrict: nonStrict, + }, nil +} + +// planCallBinary generates a binary callable Interpretable. +func (p *planner) planCallBinary(expr ast.Expr, + function string, + overload string, + impl *functions.Overload, + args []Interpretable) (Interpretable, error) { + var fn functions.BinaryOp + var trait int + var nonStrict bool + if impl != nil { + if impl.Binary == nil { + return nil, fmt.Errorf("no such overload: %s(lhs, rhs)", function) + } + fn = impl.Binary + trait = impl.OperandTrait + nonStrict = impl.NonStrict + } + return &evalBinary{ + id: expr.ID(), + function: function, + overload: overload, + lhs: args[0], + rhs: args[1], + trait: trait, + impl: fn, + nonStrict: nonStrict, + }, nil +} + +// planCallVarArgs generates a variable argument callable Interpretable. +func (p *planner) planCallVarArgs(expr ast.Expr, + function string, + overload string, + impl *functions.Overload, + args []Interpretable) (Interpretable, error) { + var fn functions.FunctionOp + var trait int + var nonStrict bool + if impl != nil { + if impl.Function == nil { + return nil, fmt.Errorf("no such overload: %s(...)", function) + } + fn = impl.Function + trait = impl.OperandTrait + nonStrict = impl.NonStrict + } + return &evalVarArgs{ + id: expr.ID(), + function: function, + overload: overload, + args: args, + trait: trait, + impl: fn, + nonStrict: nonStrict, + }, nil +} + +// planCallEqual generates an equals (==) Interpretable. +func (p *planner) planCallEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalEq{ + id: expr.ID(), + lhs: args[0], + rhs: args[1], + }, nil +} + +// planCallNotEqual generates a not equals (!=) Interpretable. +func (p *planner) planCallNotEqual(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalNe{ + id: expr.ID(), + lhs: args[0], + rhs: args[1], + }, nil +} + +// planCallLogicalAnd generates a logical and (&&) Interpretable. +func (p *planner) planCallLogicalAnd(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalAnd{ + id: expr.ID(), + terms: args, + }, nil +} + +// planCallLogicalOr generates a logical or (||) Interpretable. +func (p *planner) planCallLogicalOr(expr ast.Expr, args []Interpretable) (Interpretable, error) { + return &evalOr{ + id: expr.ID(), + terms: args, + }, nil +} + +// planCallConditional generates a conditional / ternary (c ? t : f) Interpretable. +func (p *planner) planCallConditional(expr ast.Expr, args []Interpretable) (Interpretable, error) { + cond := args[0] + t := args[1] + var tAttr Attribute + truthyAttr, isTruthyAttr := t.(InterpretableAttribute) + if isTruthyAttr { + tAttr = truthyAttr.Attr() + } else { + tAttr = p.attrFactory.RelativeAttribute(t.ID(), t) + } + + f := args[2] + var fAttr Attribute + falsyAttr, isFalsyAttr := f.(InterpretableAttribute) + if isFalsyAttr { + fAttr = falsyAttr.Attr() + } else { + fAttr = p.attrFactory.RelativeAttribute(f.ID(), f) + } + + return &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.ConditionalAttribute(expr.ID(), cond, tAttr, fAttr), + }, nil +} + +// planCallIndex either extends an attribute with the argument to the index operation, or creates +// a relative attribute based on the return of a function call or operation. +func (p *planner) planCallIndex(expr ast.Expr, args []Interpretable, optional bool) (Interpretable, error) { + op := args[0] + ind := args[1] + opType := p.typeMap[op.ID()] + + // Establish the attribute reference. + var err error + attr, isAttr := op.(InterpretableAttribute) + if !isAttr { + attr, err = p.relativeAttr(op.ID(), op, false) + if err != nil { + return nil, err + } + } + + // Construct the qualifier type. + var qual Qualifier + switch ind := ind.(type) { + case InterpretableConst: + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind.Value(), optional) + case InterpretableAttribute: + qual, err = p.attrFactory.NewQualifier(opType, expr.ID(), ind, optional) + default: + qual, err = p.relativeAttr(expr.ID(), ind, optional) + } + if err != nil { + return nil, err + } + + // Add the qualifier to the attribute + _, err = attr.AddQualifier(qual) + return attr, err +} + +// planCreateList generates a list construction Interpretable. +func (p *planner) planCreateList(expr ast.Expr) (Interpretable, error) { + list := expr.AsList() + optionalIndices := list.OptionalIndices() + elements := list.Elements() + optionals := make([]bool, len(elements)) + for _, index := range optionalIndices { + if index < 0 || index >= int32(len(elements)) { + return nil, fmt.Errorf("optional index %d out of element bounds [0, %d]", index, len(elements)) + } + optionals[index] = true + } + elems := make([]Interpretable, len(elements)) + for i, elem := range elements { + elemVal, err := p.Plan(elem) + if err != nil { + return nil, err + } + elems[i] = elemVal + } + return &evalList{ + id: expr.ID(), + elems: elems, + optionals: optionals, + hasOptionals: len(optionals) != 0, + adapter: p.adapter, + }, nil +} + +// planCreateStruct generates a map or object construction Interpretable. +func (p *planner) planCreateMap(expr ast.Expr) (Interpretable, error) { + m := expr.AsMap() + entries := m.Entries() + optionals := make([]bool, len(entries)) + keys := make([]Interpretable, len(entries)) + vals := make([]Interpretable, len(entries)) + for i, e := range entries { + entry := e.AsMapEntry() + keyVal, err := p.Plan(entry.Key()) + if err != nil { + return nil, err + } + keys[i] = keyVal + + valVal, err := p.Plan(entry.Value()) + if err != nil { + return nil, err + } + vals[i] = valVal + optionals[i] = entry.IsOptional() + } + return &evalMap{ + id: expr.ID(), + keys: keys, + vals: vals, + optionals: optionals, + hasOptionals: len(optionals) != 0, + adapter: p.adapter, + }, nil +} + +// planCreateObj generates an object construction Interpretable. +func (p *planner) planCreateStruct(expr ast.Expr) (Interpretable, error) { + obj := expr.AsStruct() + typeName, defined := p.resolveTypeName(obj.TypeName()) + if !defined { + return nil, fmt.Errorf("unknown type: %s", obj.TypeName()) + } + objFields := obj.Fields() + optionals := make([]bool, len(objFields)) + fields := make([]string, len(objFields)) + vals := make([]Interpretable, len(objFields)) + for i, f := range objFields { + field := f.AsStructField() + fields[i] = field.Name() + val, err := p.Plan(field.Value()) + if err != nil { + return nil, err + } + vals[i] = val + optionals[i] = field.IsOptional() + } + return &evalObj{ + id: expr.ID(), + typeName: typeName, + fields: fields, + vals: vals, + optionals: optionals, + hasOptionals: len(optionals) != 0, + provider: p.provider, + }, nil +} + +// planComprehension generates an Interpretable fold operation. +func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) { + fold := expr.AsComprehension() + accu, err := p.Plan(fold.AccuInit()) + if err != nil { + return nil, err + } + iterRange, err := p.Plan(fold.IterRange()) + if err != nil { + return nil, err + } + cond, err := p.Plan(fold.LoopCondition()) + if err != nil { + return nil, err + } + step, err := p.Plan(fold.LoopStep()) + if err != nil { + return nil, err + } + result, err := p.Plan(fold.Result()) + if err != nil { + return nil, err + } + return &evalFold{ + id: expr.ID(), + accuVar: fold.AccuVar(), + accu: accu, + iterVar: fold.IterVar(), + iterRange: iterRange, + cond: cond, + step: step, + result: result, + adapter: p.adapter, + }, nil +} + +// planConst generates a constant valued Interpretable. +func (p *planner) planConst(expr ast.Expr) (Interpretable, error) { + return NewConstValue(expr.ID(), expr.AsLiteral()), nil +} + +// resolveTypeName takes a qualified string constructed at parse time, applies the proto +// namespace resolution rules to it in a scan over possible matching types in the TypeProvider. +func (p *planner) resolveTypeName(typeName string) (string, bool) { + for _, qualifiedTypeName := range p.container.ResolveCandidateNames(typeName) { + if _, found := p.provider.FindStructType(qualifiedTypeName); found { + return qualifiedTypeName, true + } + } + return "", false +} + +// resolveFunction determines the call target, function name, and overload name from a given Expr +// value. +// +// The resolveFunction resolves ambiguities where a function may either be a receiver-style +// invocation or a qualified global function name. +// - The target expression may only consist of ident and select expressions. +// - The function is declared in the environment using its fully-qualified name. +// - The fully-qualified function name matches the string serialized target value. +func (p *planner) resolveFunction(expr ast.Expr) (ast.Expr, string, string) { + // Note: similar logic exists within the `checker/checker.go`. If making changes here + // please consider the impact on checker.go and consolidate implementations or mirror code + // as appropriate. + call := expr.AsCall() + var target ast.Expr = nil + if call.IsMemberFunction() { + target = call.Target() + } + fnName := call.FunctionName() + + // Checked expressions always have a reference map entry, and _should_ have the fully qualified + // function name as the fnName value. + oRef, hasOverload := p.refMap[expr.ID()] + if hasOverload { + if len(oRef.OverloadIDs) == 1 { + return target, fnName, oRef.OverloadIDs[0] + } + // Note, this namespaced function name will not appear as a fully qualified name in ASTs + // built and stored before cel-go v0.5.0; however, this functionality did not work at all + // before the v0.5.0 release. + return target, fnName, "" + } + + // Parse-only expressions need to handle the same logic as is normally performed at check time, + // but with potentially much less information. The only reliable source of information about + // which functions are configured is the dispatcher. + if target == nil { + // If the user has a parse-only expression, then it should have been configured as such in + // the interpreter dispatcher as it may have been omitted from the checker environment. + for _, qualifiedName := range p.container.ResolveCandidateNames(fnName) { + _, found := p.disp.FindOverload(qualifiedName) + if found { + return nil, qualifiedName, "" + } + } + // It's possible that the overload was not found, but this situation is accounted for in + // the planCall phase; however, the leading dot used for denoting fully-qualified + // namespaced identifiers must be stripped, as all declarations already use fully-qualified + // names. This stripping behavior is handled automatically by the ResolveCandidateNames + // call. + return target, stripLeadingDot(fnName), "" + } + + // Handle the situation where the function target actually indicates a qualified function name. + qualifiedPrefix, maybeQualified := p.toQualifiedName(target) + if maybeQualified { + maybeQualifiedName := qualifiedPrefix + "." + fnName + for _, qualifiedName := range p.container.ResolveCandidateNames(maybeQualifiedName) { + _, found := p.disp.FindOverload(qualifiedName) + if found { + // Clear the target to ensure the proper arity is used for finding the + // implementation. + return nil, qualifiedName, "" + } + } + } + // In the default case, the function is exactly as it was advertised: a receiver call on with + // an expression-based target with the given simple function name. + return target, fnName, "" +} + +// relativeAttr indicates that the attribute in this case acts as a qualifier and as such needs to +// be observed to ensure that it's evaluation value is properly recorded for state tracking. +func (p *planner) relativeAttr(id int64, eval Interpretable, opt bool) (InterpretableAttribute, error) { + eAttr, ok := eval.(InterpretableAttribute) + if !ok { + eAttr = &evalAttr{ + adapter: p.adapter, + attr: p.attrFactory.RelativeAttribute(id, eval), + optional: opt, + } + } + // This looks like it should either decorate the new evalAttr node, or early return the InterpretableAttribute + decAttr, err := p.decorate(eAttr, nil) + if err != nil { + return nil, err + } + eAttr, ok = decAttr.(InterpretableAttribute) + if !ok { + return nil, fmt.Errorf("invalid attribute decoration: %v(%T)", decAttr, decAttr) + } + return eAttr, nil +} + +// toQualifiedName converts an expression AST into a qualified name if possible, with a boolean +// 'found' value that indicates if the conversion is successful. +func (p *planner) toQualifiedName(operand ast.Expr) (string, bool) { + // If the checker identified the expression as an attribute by the type-checker, then it can't + // possibly be part of qualified name in a namespace. + _, isAttr := p.refMap[operand.ID()] + if isAttr { + return "", false + } + // Since functions cannot be both namespaced and receiver functions, if the operand is not an + // qualified variable name, return the (possibly) qualified name given the expressions. + switch operand.Kind() { + case ast.IdentKind: + id := operand.AsIdent() + return id, true + case ast.SelectKind: + sel := operand.AsSelect() + // Test only expressions are not valid as qualified names. + if sel.IsTestOnly() { + return "", false + } + if qual, found := p.toQualifiedName(sel.Operand()); found { + return qual + "." + sel.FieldName(), true + } + } + return "", false +} + +func stripLeadingDot(name string) string { + if strings.HasPrefix(name, ".") { + return name[1:] + } + return name +} diff --git a/vendor/github.com/google/cel-go/interpreter/prune.go b/vendor/github.com/google/cel-go/interpreter/prune.go new file mode 100644 index 00000000..410d80dc --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/prune.go @@ -0,0 +1,543 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +type astPruner struct { + ast.ExprFactory + expr ast.Expr + macroCalls map[int64]ast.Expr + state EvalState + nextExprID int64 +} + +// TODO Consider having a separate walk of the AST that finds common +// subexpressions. This can be called before or after constant folding to find +// common subexpressions. + +// PruneAst prunes the given AST based on the given EvalState and generates a new AST. +// Given AST is copied on write and a new AST is returned. +// Couple of typical use cases this interface would be: +// +// A) +// 1) Evaluate expr with some unknowns, +// 2) If result is unknown: +// +// a) PruneAst +// b) Goto 1 +// +// Functional call results which are known would be effectively cached across +// iterations. +// +// B) +// 1) Compile the expression (maybe via a service and maybe after checking a +// +// compiled expression does not exists in local cache) +// +// 2) Prepare the environment and the interpreter. Activation might be empty. +// 3) Eval the expression. This might return unknown or error or a concrete +// +// value. +// +// 4) PruneAst +// 4) Maybe cache the expression +// This is effectively constant folding the expression. How the environment is +// prepared in step 2 is flexible. For example, If the caller caches the +// compiled and constant folded expressions, but is not willing to constant +// fold(and thus cache results of) some external calls, then they can prepare +// the overloads accordingly. +func PruneAst(expr ast.Expr, macroCalls map[int64]ast.Expr, state EvalState) *ast.AST { + pruneState := NewEvalState() + for _, id := range state.IDs() { + v, _ := state.Value(id) + pruneState.SetValue(id, v) + } + pruner := &astPruner{ + ExprFactory: ast.NewExprFactory(), + expr: expr, + macroCalls: macroCalls, + state: pruneState, + nextExprID: getMaxID(expr)} + newExpr, _ := pruner.maybePrune(expr) + newInfo := ast.NewSourceInfo(nil) + for id, call := range pruner.macroCalls { + newInfo.SetMacroCall(id, call) + } + return ast.NewAST(newExpr, newInfo) +} + +func (p *astPruner) maybeCreateLiteral(id int64, val ref.Val) (ast.Expr, bool) { + switch v := val.(type) { + case types.Bool, types.Bytes, types.Double, types.Int, types.Null, types.String, types.Uint: + p.state.SetValue(id, val) + return p.NewLiteral(id, val), true + case types.Duration: + p.state.SetValue(id, val) + durationString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertDuration, p.NewLiteral(p.nextID(), durationString)), true + case types.Timestamp: + timestampString := v.ConvertToType(types.StringType).(types.String) + return p.NewCall(id, overloads.TypeConvertTimestamp, p.NewLiteral(p.nextID(), timestampString)), true + } + + // Attempt to build a list literal. + if list, isList := val.(traits.Lister); isList { + sz := list.Size().(types.Int) + elemExprs := make([]ast.Expr, sz) + for i := types.Int(0); i < sz; i++ { + elem := list.Get(i) + if types.IsUnknownOrError(elem) { + return nil, false + } + elemExpr, ok := p.maybeCreateLiteral(p.nextID(), elem) + if !ok { + return nil, false + } + elemExprs[i] = elemExpr + } + p.state.SetValue(id, val) + return p.NewList(id, elemExprs, []int32{}), true + } + + // Create a map literal if possible. + if mp, isMap := val.(traits.Mapper); isMap { + it := mp.Iterator() + entries := make([]ast.EntryExpr, mp.Size().(types.Int)) + i := 0 + for it.HasNext() != types.False { + key := it.Next() + val := mp.Get(key) + if types.IsUnknownOrError(key) || types.IsUnknownOrError(val) { + return nil, false + } + keyExpr, ok := p.maybeCreateLiteral(p.nextID(), key) + if !ok { + return nil, false + } + valExpr, ok := p.maybeCreateLiteral(p.nextID(), val) + if !ok { + return nil, false + } + entry := p.NewMapEntry(p.nextID(), keyExpr, valExpr, false) + entries[i] = entry + i++ + } + p.state.SetValue(id, val) + return p.NewMap(id, entries), true + } + + // TODO(issues/377) To construct message literals, the type provider will need to support + // the enumeration the fields for a given message. + return nil, false +} + +func (p *astPruner) maybePruneOptional(elem ast.Expr) (ast.Expr, bool) { + elemVal, found := p.value(elem.ID()) + if found && elemVal.Type() == types.OptionalType { + opt := elemVal.(*types.Optional) + if !opt.HasValue() { + return nil, true + } + if newElem, pruned := p.maybeCreateLiteral(elem.ID(), opt.GetValue()); pruned { + return newElem, true + } + } + return elem, false +} + +func (p *astPruner) maybePruneIn(node ast.Expr) (ast.Expr, bool) { + // elem in list + call := node.AsCall() + val, exists := p.maybeValue(call.Args()[1].ID()) + if !exists { + return nil, false + } + if sz, ok := val.(traits.Sizer); ok && sz.Size() == types.IntZero { + return p.maybeCreateLiteral(node.ID(), types.False) + } + return nil, false +} + +func (p *astPruner) maybePruneLogicalNot(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + arg := call.Args()[0] + val, exists := p.maybeValue(arg.ID()) + if !exists { + return nil, false + } + if b, ok := val.(types.Bool); ok { + return p.maybeCreateLiteral(node.ID(), !b) + } + return nil, false +} + +func (p *astPruner) maybePruneOr(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + // We know result is unknown, so we have at least one unknown arg + // and if one side is a known value, we know we can ignore it. + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { + if v == types.True { + return p.maybeCreateLiteral(node.ID(), types.True) + } + return call.Args()[1], true + } + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { + if v == types.True { + return p.maybeCreateLiteral(node.ID(), types.True) + } + return call.Args()[0], true + } + return nil, false +} + +func (p *astPruner) maybePruneAnd(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + // We know result is unknown, so we have at least one unknown arg + // and if one side is a known value, we know we can ignore it. + if v, exists := p.maybeValue(call.Args()[0].ID()); exists { + if v == types.False { + return p.maybeCreateLiteral(node.ID(), types.False) + } + return call.Args()[1], true + } + if v, exists := p.maybeValue(call.Args()[1].ID()); exists { + if v == types.False { + return p.maybeCreateLiteral(node.ID(), types.False) + } + return call.Args()[0], true + } + return nil, false +} + +func (p *astPruner) maybePruneConditional(node ast.Expr) (ast.Expr, bool) { + call := node.AsCall() + cond, exists := p.maybeValue(call.Args()[0].ID()) + if !exists { + return nil, false + } + if cond.Value().(bool) { + return call.Args()[1], true + } + return call.Args()[2], true +} + +func (p *astPruner) maybePruneFunction(node ast.Expr) (ast.Expr, bool) { + if _, exists := p.value(node.ID()); !exists { + return nil, false + } + call := node.AsCall() + if call.FunctionName() == operators.LogicalOr { + return p.maybePruneOr(node) + } + if call.FunctionName() == operators.LogicalAnd { + return p.maybePruneAnd(node) + } + if call.FunctionName() == operators.Conditional { + return p.maybePruneConditional(node) + } + if call.FunctionName() == operators.In { + return p.maybePruneIn(node) + } + if call.FunctionName() == operators.LogicalNot { + return p.maybePruneLogicalNot(node) + } + return nil, false +} + +func (p *astPruner) maybePrune(node ast.Expr) (ast.Expr, bool) { + return p.prune(node) +} + +func (p *astPruner) prune(node ast.Expr) (ast.Expr, bool) { + if node == nil { + return node, false + } + val, valueExists := p.maybeValue(node.ID()) + if valueExists { + if newNode, ok := p.maybeCreateLiteral(node.ID(), val); ok { + delete(p.macroCalls, node.ID()) + return newNode, true + } + } + if macro, found := p.macroCalls[node.ID()]; found { + // Ensure that intermediate values for the comprehension are cleared during pruning + if node.Kind() == ast.ComprehensionKind { + compre := node.AsComprehension() + visit(macro, clearIterVarVisitor(compre.IterVar(), p.state)) + } + // prune the expression in terms of the macro call instead of the expanded form. + if newMacro, pruned := p.prune(macro); pruned { + p.macroCalls[node.ID()] = newMacro + } + } + + // We have either an unknown/error value, or something we don't want to + // transform, or expression was not evaluated. If possible, drill down + // more. + switch node.Kind() { + case ast.SelectKind: + sel := node.AsSelect() + if operand, isPruned := p.maybePrune(sel.Operand()); isPruned { + if sel.IsTestOnly() { + return p.NewPresenceTest(node.ID(), operand, sel.FieldName()), true + } + return p.NewSelect(node.ID(), operand, sel.FieldName()), true + } + case ast.CallKind: + argsPruned := false + call := node.AsCall() + args := call.Args() + newArgs := make([]ast.Expr, len(args)) + for i, a := range args { + newArgs[i] = a + if arg, isPruned := p.maybePrune(a); isPruned { + argsPruned = true + newArgs[i] = arg + } + } + if !call.IsMemberFunction() { + newCall := p.NewCall(node.ID(), call.FunctionName(), newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true + } + return newCall, argsPruned + } + newTarget := call.Target() + targetPruned := false + if prunedTarget, isPruned := p.maybePrune(call.Target()); isPruned { + targetPruned = true + newTarget = prunedTarget + } + newCall := p.NewMemberCall(node.ID(), call.FunctionName(), newTarget, newArgs...) + if prunedCall, isPruned := p.maybePruneFunction(newCall); isPruned { + return prunedCall, true + } + return newCall, targetPruned || argsPruned + case ast.ListKind: + l := node.AsList() + elems := l.Elements() + optIndices := l.OptionalIndices() + optIndexMap := map[int32]bool{} + for _, i := range optIndices { + optIndexMap[i] = true + } + newOptIndexMap := make(map[int32]bool, len(optIndexMap)) + newElems := make([]ast.Expr, 0, len(elems)) + var listPruned bool + prunedIdx := 0 + for i, elem := range elems { + _, isOpt := optIndexMap[int32(i)] + if isOpt { + newElem, pruned := p.maybePruneOptional(elem) + if pruned { + listPruned = true + if newElem != nil { + newElems = append(newElems, newElem) + prunedIdx++ + } + continue + } + newOptIndexMap[int32(prunedIdx)] = true + } + if newElem, prunedElem := p.maybePrune(elem); prunedElem { + newElems = append(newElems, newElem) + listPruned = true + } else { + newElems = append(newElems, elem) + } + prunedIdx++ + } + optIndices = make([]int32, len(newOptIndexMap)) + idx := 0 + for i := range newOptIndexMap { + optIndices[idx] = i + idx++ + } + if listPruned { + return p.NewList(node.ID(), newElems, optIndices), true + } + case ast.MapKind: + var mapPruned bool + m := node.AsMap() + entries := m.Entries() + newEntries := make([]ast.EntryExpr, len(entries)) + for i, entry := range entries { + newEntries[i] = entry + e := entry.AsMapEntry() + newKey, keyPruned := p.maybePrune(e.Key()) + newValue, valuePruned := p.maybePrune(e.Value()) + if !keyPruned && !valuePruned { + continue + } + mapPruned = true + newEntry := p.NewMapEntry(entry.ID(), newKey, newValue, e.IsOptional()) + newEntries[i] = newEntry + } + if mapPruned { + return p.NewMap(node.ID(), newEntries), true + } + case ast.StructKind: + var structPruned bool + obj := node.AsStruct() + fields := obj.Fields() + newFields := make([]ast.EntryExpr, len(fields)) + for i, field := range fields { + newFields[i] = field + f := field.AsStructField() + newValue, prunedValue := p.maybePrune(f.Value()) + if !prunedValue { + continue + } + structPruned = true + newEntry := p.NewStructField(field.ID(), f.Name(), newValue, f.IsOptional()) + newFields[i] = newEntry + } + if structPruned { + return p.NewStruct(node.ID(), obj.TypeName(), newFields), true + } + case ast.ComprehensionKind: + compre := node.AsComprehension() + // Only the range of the comprehension is pruned since the state tracking only records + // the last iteration of the comprehension and not each step in the evaluation which + // means that the any residuals computed in between might be inaccurate. + if newRange, pruned := p.maybePrune(compre.IterRange()); pruned { + return p.NewComprehension( + node.ID(), + newRange, + compre.IterVar(), + compre.AccuVar(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result(), + ), true + } + } + return node, false +} + +func (p *astPruner) value(id int64) (ref.Val, bool) { + val, found := p.state.Value(id) + return val, (found && val != nil) +} + +func (p *astPruner) maybeValue(id int64) (ref.Val, bool) { + val, found := p.value(id) + if !found || types.IsUnknownOrError(val) { + return nil, false + } + return val, true +} + +func (p *astPruner) nextID() int64 { + next := p.nextExprID + p.nextExprID++ + return next +} + +type astVisitor struct { + // visitEntry is called on every expr node, including those within a map/struct entry. + visitExpr func(expr ast.Expr) + // visitEntry is called before entering the key, value of a map/struct entry. + visitEntry func(entry ast.EntryExpr) +} + +func getMaxID(expr ast.Expr) int64 { + maxID := int64(1) + visit(expr, maxIDVisitor(&maxID)) + return maxID +} + +func clearIterVarVisitor(varName string, state EvalState) astVisitor { + return astVisitor{ + visitExpr: func(e ast.Expr) { + if e.Kind() == ast.IdentKind && e.AsIdent() == varName { + state.SetValue(e.ID(), nil) + } + }, + } +} + +func maxIDVisitor(maxID *int64) astVisitor { + return astVisitor{ + visitExpr: func(e ast.Expr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 + } + }, + visitEntry: func(e ast.EntryExpr) { + if e.ID() >= *maxID { + *maxID = e.ID() + 1 + } + }, + } +} + +func visit(expr ast.Expr, visitor astVisitor) { + exprs := []ast.Expr{expr} + for len(exprs) != 0 { + e := exprs[0] + if visitor.visitExpr != nil { + visitor.visitExpr(e) + } + exprs = exprs[1:] + switch e.Kind() { + case ast.SelectKind: + exprs = append(exprs, e.AsSelect().Operand()) + case ast.CallKind: + call := e.AsCall() + if call.Target() != nil { + exprs = append(exprs, call.Target()) + } + exprs = append(exprs, call.Args()...) + case ast.ComprehensionKind: + compre := e.AsComprehension() + exprs = append(exprs, + compre.IterRange(), + compre.AccuInit(), + compre.LoopCondition(), + compre.LoopStep(), + compre.Result()) + case ast.ListKind: + list := e.AsList() + exprs = append(exprs, list.Elements()...) + case ast.MapKind: + for _, entry := range e.AsMap().Entries() { + e := entry.AsMapEntry() + if visitor.visitEntry != nil { + visitor.visitEntry(entry) + } + exprs = append(exprs, e.Key()) + exprs = append(exprs, e.Value()) + } + case ast.StructKind: + for _, entry := range e.AsStruct().Fields() { + f := entry.AsStructField() + if visitor.visitEntry != nil { + visitor.visitEntry(entry) + } + exprs = append(exprs, f.Value()) + } + } + } +} diff --git a/vendor/github.com/google/cel-go/interpreter/runtimecost.go b/vendor/github.com/google/cel-go/interpreter/runtimecost.go new file mode 100644 index 00000000..b9b307c1 --- /dev/null +++ b/vendor/github.com/google/cel-go/interpreter/runtimecost.go @@ -0,0 +1,316 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package interpreter + +import ( + "math" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/overloads" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// WARNING: Any changes to cost calculations in this file require a corresponding change in checker/cost.go + +// ActualCostEstimator provides function call cost estimations at runtime +// CallCost returns an estimated cost for the function overload invocation with the given args, or nil if it has no +// estimate to provide. CEL attempts to provide reasonable estimates for its standard function library, so CallCost +// should typically not need to provide an estimate for CELs standard function. +type ActualCostEstimator interface { + CallCost(function, overloadID string, args []ref.Val, result ref.Val) *uint64 +} + +// CostObserver provides an observer that tracks runtime cost. +func CostObserver(tracker *CostTracker) EvalObserver { + observer := func(id int64, programStep any, val ref.Val) { + switch t := programStep.(type) { + case ConstantQualifier: + // TODO: Push identifiers on to the stack before observing constant qualifiers that apply to them + // and enable the below pop. Once enabled this can case can be collapsed into the Qualifier case. + tracker.cost++ + case InterpretableConst: + // zero cost + case InterpretableAttribute: + switch a := t.Attr().(type) { + case *conditionalAttribute: + // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions. + tracker.stack.drop(a.falsy.ID(), a.truthy.ID(), a.expr.ID()) + default: + tracker.stack.drop(t.Attr().ID()) + tracker.cost += common.SelectAndIdentCost + } + if !tracker.presenceTestHasCost { + if _, isTestOnly := programStep.(*evalTestOnly); isTestOnly { + tracker.cost -= common.SelectAndIdentCost + } + } + case *evalExhaustiveConditional: + // Ternary has no direct cost. All cost is from the conditional and the true/false branch expressions. + tracker.stack.drop(t.attr.falsy.ID(), t.attr.truthy.ID(), t.attr.expr.ID()) + + // While the field names are identical, the boolean operation eval structs do not share an interface and so + // must be handled individually. + case *evalOr: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalAnd: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalExhaustiveOr: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalExhaustiveAnd: + for _, term := range t.terms { + tracker.stack.drop(term.ID()) + } + case *evalFold: + tracker.stack.drop(t.iterRange.ID()) + case Qualifier: + tracker.cost++ + case InterpretableCall: + if argVals, ok := tracker.stack.dropArgs(t.Args()); ok { + tracker.cost += tracker.costCall(t, argVals, val) + } + case InterpretableConstructor: + tracker.stack.dropArgs(t.InitVals()) + switch t.Type() { + case types.ListType: + tracker.cost += common.ListCreateBaseCost + case types.MapType: + tracker.cost += common.MapCreateBaseCost + default: + tracker.cost += common.StructCreateBaseCost + } + } + tracker.stack.push(val, id) + + if tracker.Limit != nil && tracker.cost > *tracker.Limit { + panic(EvalCancelledError{Cause: CostLimitExceeded, Message: "operation cancelled: actual cost limit exceeded"}) + } + } + return observer +} + +// CostTrackerOption configures the behavior of CostTracker objects. +type CostTrackerOption func(*CostTracker) error + +// CostTrackerLimit sets the runtime limit on the evaluation cost during execution and will terminate the expression +// evaluation if the limit is exceeded. +func CostTrackerLimit(limit uint64) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.Limit = &limit + return nil + } +} + +// PresenceTestHasCost determines whether presence testing has a cost of one or zero. +// Defaults to presence test has a cost of one. +func PresenceTestHasCost(hasCost bool) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.presenceTestHasCost = hasCost + return nil + } +} + +// NewCostTracker creates a new CostTracker with a given estimator and a set of functional CostTrackerOption values. +func NewCostTracker(estimator ActualCostEstimator, opts ...CostTrackerOption) (*CostTracker, error) { + tracker := &CostTracker{ + Estimator: estimator, + overloadTrackers: map[string]FunctionTracker{}, + presenceTestHasCost: true, + } + for _, opt := range opts { + err := opt(tracker) + if err != nil { + return nil, err + } + } + return tracker, nil +} + +// OverloadCostTracker binds an overload ID to a runtime FunctionTracker implementation. +// +// OverloadCostTracker instances augment or override ActualCostEstimator decisions, allowing for versioned and/or +// optional cost tracking changes. +func OverloadCostTracker(overloadID string, fnTracker FunctionTracker) CostTrackerOption { + return func(tracker *CostTracker) error { + tracker.overloadTrackers[overloadID] = fnTracker + return nil + } +} + +// FunctionTracker computes the actual cost of evaluating the functions with the given arguments and result. +type FunctionTracker func(args []ref.Val, result ref.Val) *uint64 + +// CostTracker represents the information needed for tracking runtime cost. +type CostTracker struct { + Estimator ActualCostEstimator + overloadTrackers map[string]FunctionTracker + Limit *uint64 + presenceTestHasCost bool + + cost uint64 + stack refValStack +} + +// ActualCost returns the runtime cost +func (c *CostTracker) ActualCost() uint64 { + return c.cost +} + +func (c *CostTracker) costCall(call InterpretableCall, args []ref.Val, result ref.Val) uint64 { + var cost uint64 + if len(c.overloadTrackers) != 0 { + if tracker, found := c.overloadTrackers[call.OverloadID()]; found { + callCost := tracker(args, result) + if callCost != nil { + cost += *callCost + return cost + } + } + } + if c.Estimator != nil { + callCost := c.Estimator.CallCost(call.Function(), call.OverloadID(), args, result) + if callCost != nil { + cost += *callCost + return cost + } + } + // if user didn't specify, the default way of calculating runtime cost would be used. + // if user has their own implementation of ActualCostEstimator, make sure to cover the mapping between overloadId and cost calculation + switch call.OverloadID() { + // O(n) functions + case overloads.StartsWithString, overloads.EndsWithString, overloads.StringToBytes, overloads.BytesToString, overloads.ExtQuoteString, overloads.ExtFormatString: + cost += uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + case overloads.InList: + // If a list is composed entirely of constant values this is O(1), but we don't account for that here. + // We just assume all list containment checks are O(n). + cost += c.actualSize(args[1]) + // O(min(m, n)) functions + case overloads.LessString, overloads.GreaterString, overloads.LessEqualsString, overloads.GreaterEqualsString, + overloads.LessBytes, overloads.GreaterBytes, overloads.LessEqualsBytes, overloads.GreaterEqualsBytes, + overloads.Equals, overloads.NotEquals: + // When we check the equality of 2 scalar values (e.g. 2 integers, 2 floating-point numbers, 2 booleans etc.), + // the CostTracker.actualSize() function by definition returns 1 for each operand, resulting in an overall cost + // of 1. + lhsSize := c.actualSize(args[0]) + rhsSize := c.actualSize(args[1]) + minSize := lhsSize + if rhsSize < minSize { + minSize = rhsSize + } + cost += uint64(math.Ceil(float64(minSize) * common.StringTraversalCostFactor)) + // O(m+n) functions + case overloads.AddString, overloads.AddBytes: + // In the worst case scenario, we would need to reallocate a new backing store and copy both operands over. + cost += uint64(math.Ceil(float64(c.actualSize(args[0])+c.actualSize(args[1])) * common.StringTraversalCostFactor)) + // O(nm) functions + case overloads.MatchesString: + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := uint64(math.Ceil((1.0 + float64(c.actualSize(args[0]))) * common.StringTraversalCostFactor)) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.RegexStringLengthCostFactor)) + cost += strCost * regexCost + case overloads.ContainsString: + strCost := uint64(math.Ceil(float64(c.actualSize(args[0])) * common.StringTraversalCostFactor)) + substrCost := uint64(math.Ceil(float64(c.actualSize(args[1])) * common.StringTraversalCostFactor)) + cost += strCost * substrCost + + default: + // The following operations are assumed to have O(1) complexity. + // - AddList due to the implementation. Index lookup can be O(c) the + // number of concatenated lists, but we don't track that is cost calculations. + // - Conversions, since none perform a traversal of a type of unbound length. + // - Computing the size of strings, byte sequences, lists and maps. + // - Logical operations and all operators on fixed width scalars (comparisons, equality) + // - Any functions that don't have a declared cost either here or in provided ActualCostEstimator. + cost++ + + } + return cost +} + +// actualSize returns the size of value +func (c *CostTracker) actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } + return 1 +} + +type stackVal struct { + Val ref.Val + ID int64 +} + +// refValStack keeps track of values of the stack for cost calculation purposes +type refValStack []stackVal + +func (s *refValStack) push(val ref.Val, id int64) { + value := stackVal{Val: val, ID: id} + *s = append(*s, value) +} + +// TODO: Allowing drop and dropArgs to remove stack items above the IDs they are provided is a workaround. drop and dropArgs +// should find and remove only the stack items matching the provided IDs once all attributes are properly pushed and popped from stack. + +// drop searches the stack for each ID and removes the ID and all stack items above it. +// If none of the IDs are found, the stack is not modified. +// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's +// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped. +func (s *refValStack) drop(ids ...int64) { + for _, id := range ids { + for idx := len(*s) - 1; idx >= 0; idx-- { + if (*s)[idx].ID == id { + *s = (*s)[:idx] + break + } + } + } +} + +// dropArgs searches the stack for all the args by their IDs, accumulates their associated ref.Vals and drops any +// stack items above any of the arg IDs. If any of the IDs are not found the stack, false is returned. +// Args are assumed to be found in the stack in reverse order, i.e. the last arg is expected to be found highest in +// the stack. +// WARNING: It is possible for multiple expressions with the same ID to exist (due to how macros are implemented) so it's +// possible that a dropped ID will remain on the stack. They should be removed when IDs on the stack are popped. +func (s *refValStack) dropArgs(args []Interpretable) ([]ref.Val, bool) { + result := make([]ref.Val, len(args)) +argloop: + for nIdx := len(args) - 1; nIdx >= 0; nIdx-- { + for idx := len(*s) - 1; idx >= 0; idx-- { + if (*s)[idx].ID == args[nIdx].ID() { + el := (*s)[idx] + *s = (*s)[:idx] + result[nIdx] = el.Val + continue argloop + } + } + return nil, false + } + return result, true +} diff --git a/vendor/github.com/google/cel-go/parser/BUILD.bazel b/vendor/github.com/google/cel-go/parser/BUILD.bazel new file mode 100644 index 00000000..97bc9bd4 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/BUILD.bazel @@ -0,0 +1,58 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package( + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "errors.go", + "helper.go", + "input.go", + "macro.go", + "options.go", + "parser.go", + "unescape.go", + "unparser.go", + ], + importpath = "github.com/google/cel-go/parser", + visibility = ["//visibility:public"], + deps = [ + "//common:go_default_library", + "//common/ast:go_default_library", + "//common/operators:go_default_library", + "//common/runes:go_default_library", + "//common/types:go_default_library", + "//common/types/ref:go_default_library", + "//parser/gen:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", + "@org_golang_google_genproto_googleapis_api//expr/v1alpha1:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//types/known/structpb:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "helper_test.go", + "parser_test.go", + "unescape_test.go", + "unparser_test.go", + ], + embed = [ + ":go_default_library", + ], + deps = [ + "//common/ast:go_default_library", + "//common/debug:go_default_library", + "//common/types:go_default_library", + "//parser/gen:go_default_library", + "//test:go_default_library", + "@com_github_antlr4_go_antlr_v4//:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", + "@org_golang_google_protobuf//testing/protocmp:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/parser/errors.go b/vendor/github.com/google/cel-go/parser/errors.go new file mode 100644 index 00000000..93ae7a3a --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/errors.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + + "github.com/google/cel-go/common" +) + +// parseErrors is a specialization of Errors. +type parseErrors struct { + errs *common.Errors +} + +// errorCount indicates the number of errors reported. +func (e *parseErrors) errorCount() int { + return len(e.errs.GetErrors()) +} + +func (e *parseErrors) internalError(message string) { + e.errs.ReportErrorAtID(0, common.NoLocation, message) +} + +func (e *parseErrors) syntaxError(l common.Location, message string) { + e.errs.ReportErrorAtID(0, l, fmt.Sprintf("Syntax error: %s", message)) +} + +func (e *parseErrors) reportErrorAtID(id int64, l common.Location, message string, args ...any) { + e.errs.ReportErrorAtID(id, l, message, args...) +} diff --git a/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel new file mode 100644 index 00000000..e7043348 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +package( + default_visibility = ["//parser:__subpackages__"], + licenses = ["notice"], # Apache 2.0 +) + +go_library( + name = "go_default_library", + srcs = [ + "cel_base_listener.go", + "cel_base_visitor.go", + "cel_lexer.go", + "cel_listener.go", + "cel_parser.go", + "cel_visitor.go", + ], + data = [ + "CEL.tokens", + "CELLexer.tokens", + ], + importpath = "github.com/google/cel-go/parser/gen", + deps = [ + "@com_github_antlr4_go_antlr_v4//:go_default_library", + ], +) diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.g4 b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 new file mode 100644 index 00000000..b011da80 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.g4 @@ -0,0 +1,200 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +grammar CEL; + +// Grammar Rules +// ============= + +start + : e=expr EOF + ; + +expr + : e=conditionalOr (op='?' e1=conditionalOr ':' e2=expr)? + ; + +conditionalOr + : e=conditionalAnd (ops+='||' e1+=conditionalAnd)* + ; + +conditionalAnd + : e=relation (ops+='&&' e1+=relation)* + ; + +relation + : calc + | relation op=('<'|'<='|'>='|'>'|'=='|'!='|'in') relation + ; + +calc + : unary + | calc op=('*'|'/'|'%') calc + | calc op=('+'|'-') calc + ; + +unary + : member # MemberExpr + | (ops+='!')+ member # LogicalNot + | (ops+='-')+ member # Negate + ; + +member + : primary # PrimaryExpr + | member op='.' (opt='?')? id=IDENTIFIER # Select + | member op='.' id=IDENTIFIER open='(' args=exprList? ')' # MemberCall + | member op='[' (opt='?')? index=expr ']' # Index + ; + +primary + : leadingDot='.'? id=IDENTIFIER (op='(' args=exprList? ')')? # IdentOrGlobalCall + | '(' e=expr ')' # Nested + | op='[' elems=listInit? ','? ']' # CreateList + | op='{' entries=mapInitializerList? ','? '}' # CreateStruct + | leadingDot='.'? ids+=IDENTIFIER (ops+='.' ids+=IDENTIFIER)* + op='{' entries=fieldInitializerList? ','? '}' # CreateMessage + | literal # ConstantLiteral + ; + +exprList + : e+=expr (',' e+=expr)* + ; + +listInit + : elems+=optExpr (',' elems+=optExpr)* + ; + +fieldInitializerList + : fields+=optField cols+=':' values+=expr (',' fields+=optField cols+=':' values+=expr)* + ; + +optField + : (opt='?')? IDENTIFIER + ; + +mapInitializerList + : keys+=optExpr cols+=':' values+=expr (',' keys+=optExpr cols+=':' values+=expr)* + ; + +optExpr + : (opt='?')? e=expr + ; + +literal + : sign=MINUS? tok=NUM_INT # Int + | tok=NUM_UINT # Uint + | sign=MINUS? tok=NUM_FLOAT # Double + | tok=STRING # String + | tok=BYTES # Bytes + | tok=CEL_TRUE # BoolTrue + | tok=CEL_FALSE # BoolFalse + | tok=NUL # Null + ; + +// Lexer Rules +// =========== + +EQUALS : '=='; +NOT_EQUALS : '!='; +IN: 'in'; +LESS : '<'; +LESS_EQUALS : '<='; +GREATER_EQUALS : '>='; +GREATER : '>'; +LOGICAL_AND : '&&'; +LOGICAL_OR : '||'; + +LBRACKET : '['; +RPRACKET : ']'; +LBRACE : '{'; +RBRACE : '}'; +LPAREN : '('; +RPAREN : ')'; +DOT : '.'; +COMMA : ','; +MINUS : '-'; +EXCLAM : '!'; +QUESTIONMARK : '?'; +COLON : ':'; +PLUS : '+'; +STAR : '*'; +SLASH : '/'; +PERCENT : '%'; +CEL_TRUE : 'true'; +CEL_FALSE : 'false'; +NUL : 'null'; + +fragment BACKSLASH : '\\'; +fragment LETTER : 'A'..'Z' | 'a'..'z' ; +fragment DIGIT : '0'..'9' ; +fragment EXPONENT : ('e' | 'E') ( '+' | '-' )? DIGIT+ ; +fragment HEXDIGIT : ('0'..'9'|'a'..'f'|'A'..'F') ; +fragment RAW : 'r' | 'R'; + +fragment ESC_SEQ + : ESC_CHAR_SEQ + | ESC_BYTE_SEQ + | ESC_UNI_SEQ + | ESC_OCT_SEQ + ; + +fragment ESC_CHAR_SEQ + : BACKSLASH ('a'|'b'|'f'|'n'|'r'|'t'|'v'|'"'|'\''|'\\'|'?'|'`') + ; + +fragment ESC_OCT_SEQ + : BACKSLASH ('0'..'3') ('0'..'7') ('0'..'7') + ; + +fragment ESC_BYTE_SEQ + : BACKSLASH ( 'x' | 'X' ) HEXDIGIT HEXDIGIT + ; + +fragment ESC_UNI_SEQ + : BACKSLASH 'u' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT + | BACKSLASH 'U' HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT HEXDIGIT + ; + +WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ -> channel(HIDDEN) ; +COMMENT : '//' (~'\n')* -> channel(HIDDEN) ; + +NUM_FLOAT + : ( DIGIT+ ('.' DIGIT+) EXPONENT? + | DIGIT+ EXPONENT + | '.' DIGIT+ EXPONENT? + ) + ; + +NUM_INT + : ( DIGIT+ | '0x' HEXDIGIT+ ); + +NUM_UINT + : DIGIT+ ( 'u' | 'U' ) + | '0x' HEXDIGIT+ ( 'u' | 'U' ) + ; + +STRING + : '"' (ESC_SEQ | ~('\\'|'"'|'\n'|'\r'))* '"' + | '\'' (ESC_SEQ | ~('\\'|'\''|'\n'|'\r'))* '\'' + | '"""' (ESC_SEQ | ~('\\'))*? '"""' + | '\'\'\'' (ESC_SEQ | ~('\\'))*? '\'\'\'' + | RAW '"' ~('"'|'\n'|'\r')* '"' + | RAW '\'' ~('\''|'\n'|'\r')* '\'' + | RAW '"""' .*? '"""' + | RAW '\'\'\'' .*? '\'\'\'' + ; + +BYTES : ('b' | 'B') STRING; + +IDENTIFIER : (LETTER | '_') ( LETTER | DIGIT | '_')*; diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.interp b/vendor/github.com/google/cel-go/parser/gen/CEL.interp new file mode 100644 index 00000000..75b8bb3e --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.interp @@ -0,0 +1,99 @@ +token literal names: +null +'==' +'!=' +'in' +'<' +'<=' +'>=' +'>' +'&&' +'||' +'[' +']' +'{' +'}' +'(' +')' +'.' +',' +'-' +'!' +'?' +':' +'+' +'*' +'/' +'%' +'true' +'false' +'null' +null +null +null +null +null +null +null +null + +token symbolic names: +null +EQUALS +NOT_EQUALS +IN +LESS +LESS_EQUALS +GREATER_EQUALS +GREATER +LOGICAL_AND +LOGICAL_OR +LBRACKET +RPRACKET +LBRACE +RBRACE +LPAREN +RPAREN +DOT +COMMA +MINUS +EXCLAM +QUESTIONMARK +COLON +PLUS +STAR +SLASH +PERCENT +CEL_TRUE +CEL_FALSE +NUL +WHITESPACE +COMMENT +NUM_FLOAT +NUM_INT +NUM_UINT +STRING +BYTES +IDENTIFIER + +rule names: +start +expr +conditionalOr +conditionalAnd +relation +calc +unary +member +primary +exprList +listInit +fieldInitializerList +optField +mapInitializerList +optExpr +literal + + +atn: +[4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, 240, 248] \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/CEL.tokens b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens new file mode 100644 index 00000000..b305bdad --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/CEL.tokens @@ -0,0 +1,64 @@ +EQUALS=1 +NOT_EQUALS=2 +IN=3 +LESS=4 +LESS_EQUALS=5 +GREATER_EQUALS=6 +GREATER=7 +LOGICAL_AND=8 +LOGICAL_OR=9 +LBRACKET=10 +RPRACKET=11 +LBRACE=12 +RBRACE=13 +LPAREN=14 +RPAREN=15 +DOT=16 +COMMA=17 +MINUS=18 +EXCLAM=19 +QUESTIONMARK=20 +COLON=21 +PLUS=22 +STAR=23 +SLASH=24 +PERCENT=25 +CEL_TRUE=26 +CEL_FALSE=27 +NUL=28 +WHITESPACE=29 +COMMENT=30 +NUM_FLOAT=31 +NUM_INT=32 +NUM_UINT=33 +STRING=34 +BYTES=35 +IDENTIFIER=36 +'=='=1 +'!='=2 +'in'=3 +'<'=4 +'<='=5 +'>='=6 +'>'=7 +'&&'=8 +'||'=9 +'['=10 +']'=11 +'{'=12 +'}'=13 +'('=14 +')'=15 +'.'=16 +','=17 +'-'=18 +'!'=19 +'?'=20 +':'=21 +'+'=22 +'*'=23 +'/'=24 +'%'=25 +'true'=26 +'false'=27 +'null'=28 diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp new file mode 100644 index 00000000..26e7f471 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.interp @@ -0,0 +1,136 @@ +token literal names: +null +'==' +'!=' +'in' +'<' +'<=' +'>=' +'>' +'&&' +'||' +'[' +']' +'{' +'}' +'(' +')' +'.' +',' +'-' +'!' +'?' +':' +'+' +'*' +'/' +'%' +'true' +'false' +'null' +null +null +null +null +null +null +null +null + +token symbolic names: +null +EQUALS +NOT_EQUALS +IN +LESS +LESS_EQUALS +GREATER_EQUALS +GREATER +LOGICAL_AND +LOGICAL_OR +LBRACKET +RPRACKET +LBRACE +RBRACE +LPAREN +RPAREN +DOT +COMMA +MINUS +EXCLAM +QUESTIONMARK +COLON +PLUS +STAR +SLASH +PERCENT +CEL_TRUE +CEL_FALSE +NUL +WHITESPACE +COMMENT +NUM_FLOAT +NUM_INT +NUM_UINT +STRING +BYTES +IDENTIFIER + +rule names: +EQUALS +NOT_EQUALS +IN +LESS +LESS_EQUALS +GREATER_EQUALS +GREATER +LOGICAL_AND +LOGICAL_OR +LBRACKET +RPRACKET +LBRACE +RBRACE +LPAREN +RPAREN +DOT +COMMA +MINUS +EXCLAM +QUESTIONMARK +COLON +PLUS +STAR +SLASH +PERCENT +CEL_TRUE +CEL_FALSE +NUL +BACKSLASH +LETTER +DIGIT +EXPONENT +HEXDIGIT +RAW +ESC_SEQ +ESC_CHAR_SEQ +ESC_OCT_SEQ +ESC_BYTE_SEQ +ESC_UNI_SEQ +WHITESPACE +COMMENT +NUM_FLOAT +NUM_INT +NUM_UINT +STRING +BYTES +IDENTIFIER + +channel names: +DEFAULT_TOKEN_CHANNEL +HIDDEN + +mode names: +DEFAULT_MODE + +atn: +[4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, 413, 418, 420, 1, 0, 1, 0] \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens new file mode 100644 index 00000000..b305bdad --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens @@ -0,0 +1,64 @@ +EQUALS=1 +NOT_EQUALS=2 +IN=3 +LESS=4 +LESS_EQUALS=5 +GREATER_EQUALS=6 +GREATER=7 +LOGICAL_AND=8 +LOGICAL_OR=9 +LBRACKET=10 +RPRACKET=11 +LBRACE=12 +RBRACE=13 +LPAREN=14 +RPAREN=15 +DOT=16 +COMMA=17 +MINUS=18 +EXCLAM=19 +QUESTIONMARK=20 +COLON=21 +PLUS=22 +STAR=23 +SLASH=24 +PERCENT=25 +CEL_TRUE=26 +CEL_FALSE=27 +NUL=28 +WHITESPACE=29 +COMMENT=30 +NUM_FLOAT=31 +NUM_INT=32 +NUM_UINT=33 +STRING=34 +BYTES=35 +IDENTIFIER=36 +'=='=1 +'!='=2 +'in'=3 +'<'=4 +'<='=5 +'>='=6 +'>'=7 +'&&'=8 +'||'=9 +'['=10 +']'=11 +'{'=12 +'}'=13 +'('=14 +')'=15 +'.'=16 +','=17 +'-'=18 +'!'=19 +'?'=20 +':'=21 +'+'=22 +'*'=23 +'/'=24 +'%'=25 +'true'=26 +'false'=27 +'null'=28 diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go new file mode 100644 index 00000000..c49d0386 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go @@ -0,0 +1,219 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + +// BaseCELListener is a complete listener for a parse tree produced by CELParser. +type BaseCELListener struct{} + +var _ CELListener = &BaseCELListener{} + +// VisitTerminal is called when a terminal node is visited. +func (s *BaseCELListener) VisitTerminal(node antlr.TerminalNode) {} + +// VisitErrorNode is called when an error node is visited. +func (s *BaseCELListener) VisitErrorNode(node antlr.ErrorNode) {} + +// EnterEveryRule is called when any rule is entered. +func (s *BaseCELListener) EnterEveryRule(ctx antlr.ParserRuleContext) {} + +// ExitEveryRule is called when any rule is exited. +func (s *BaseCELListener) ExitEveryRule(ctx antlr.ParserRuleContext) {} + +// EnterStart is called when production start is entered. +func (s *BaseCELListener) EnterStart(ctx *StartContext) {} + +// ExitStart is called when production start is exited. +func (s *BaseCELListener) ExitStart(ctx *StartContext) {} + +// EnterExpr is called when production expr is entered. +func (s *BaseCELListener) EnterExpr(ctx *ExprContext) {} + +// ExitExpr is called when production expr is exited. +func (s *BaseCELListener) ExitExpr(ctx *ExprContext) {} + +// EnterConditionalOr is called when production conditionalOr is entered. +func (s *BaseCELListener) EnterConditionalOr(ctx *ConditionalOrContext) {} + +// ExitConditionalOr is called when production conditionalOr is exited. +func (s *BaseCELListener) ExitConditionalOr(ctx *ConditionalOrContext) {} + +// EnterConditionalAnd is called when production conditionalAnd is entered. +func (s *BaseCELListener) EnterConditionalAnd(ctx *ConditionalAndContext) {} + +// ExitConditionalAnd is called when production conditionalAnd is exited. +func (s *BaseCELListener) ExitConditionalAnd(ctx *ConditionalAndContext) {} + +// EnterRelation is called when production relation is entered. +func (s *BaseCELListener) EnterRelation(ctx *RelationContext) {} + +// ExitRelation is called when production relation is exited. +func (s *BaseCELListener) ExitRelation(ctx *RelationContext) {} + +// EnterCalc is called when production calc is entered. +func (s *BaseCELListener) EnterCalc(ctx *CalcContext) {} + +// ExitCalc is called when production calc is exited. +func (s *BaseCELListener) ExitCalc(ctx *CalcContext) {} + +// EnterMemberExpr is called when production MemberExpr is entered. +func (s *BaseCELListener) EnterMemberExpr(ctx *MemberExprContext) {} + +// ExitMemberExpr is called when production MemberExpr is exited. +func (s *BaseCELListener) ExitMemberExpr(ctx *MemberExprContext) {} + +// EnterLogicalNot is called when production LogicalNot is entered. +func (s *BaseCELListener) EnterLogicalNot(ctx *LogicalNotContext) {} + +// ExitLogicalNot is called when production LogicalNot is exited. +func (s *BaseCELListener) ExitLogicalNot(ctx *LogicalNotContext) {} + +// EnterNegate is called when production Negate is entered. +func (s *BaseCELListener) EnterNegate(ctx *NegateContext) {} + +// ExitNegate is called when production Negate is exited. +func (s *BaseCELListener) ExitNegate(ctx *NegateContext) {} + +// EnterMemberCall is called when production MemberCall is entered. +func (s *BaseCELListener) EnterMemberCall(ctx *MemberCallContext) {} + +// ExitMemberCall is called when production MemberCall is exited. +func (s *BaseCELListener) ExitMemberCall(ctx *MemberCallContext) {} + +// EnterSelect is called when production Select is entered. +func (s *BaseCELListener) EnterSelect(ctx *SelectContext) {} + +// ExitSelect is called when production Select is exited. +func (s *BaseCELListener) ExitSelect(ctx *SelectContext) {} + +// EnterPrimaryExpr is called when production PrimaryExpr is entered. +func (s *BaseCELListener) EnterPrimaryExpr(ctx *PrimaryExprContext) {} + +// ExitPrimaryExpr is called when production PrimaryExpr is exited. +func (s *BaseCELListener) ExitPrimaryExpr(ctx *PrimaryExprContext) {} + +// EnterIndex is called when production Index is entered. +func (s *BaseCELListener) EnterIndex(ctx *IndexContext) {} + +// ExitIndex is called when production Index is exited. +func (s *BaseCELListener) ExitIndex(ctx *IndexContext) {} + +// EnterIdentOrGlobalCall is called when production IdentOrGlobalCall is entered. +func (s *BaseCELListener) EnterIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} + +// ExitIdentOrGlobalCall is called when production IdentOrGlobalCall is exited. +func (s *BaseCELListener) ExitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) {} + +// EnterNested is called when production Nested is entered. +func (s *BaseCELListener) EnterNested(ctx *NestedContext) {} + +// ExitNested is called when production Nested is exited. +func (s *BaseCELListener) ExitNested(ctx *NestedContext) {} + +// EnterCreateList is called when production CreateList is entered. +func (s *BaseCELListener) EnterCreateList(ctx *CreateListContext) {} + +// ExitCreateList is called when production CreateList is exited. +func (s *BaseCELListener) ExitCreateList(ctx *CreateListContext) {} + +// EnterCreateStruct is called when production CreateStruct is entered. +func (s *BaseCELListener) EnterCreateStruct(ctx *CreateStructContext) {} + +// ExitCreateStruct is called when production CreateStruct is exited. +func (s *BaseCELListener) ExitCreateStruct(ctx *CreateStructContext) {} + +// EnterCreateMessage is called when production CreateMessage is entered. +func (s *BaseCELListener) EnterCreateMessage(ctx *CreateMessageContext) {} + +// ExitCreateMessage is called when production CreateMessage is exited. +func (s *BaseCELListener) ExitCreateMessage(ctx *CreateMessageContext) {} + +// EnterConstantLiteral is called when production ConstantLiteral is entered. +func (s *BaseCELListener) EnterConstantLiteral(ctx *ConstantLiteralContext) {} + +// ExitConstantLiteral is called when production ConstantLiteral is exited. +func (s *BaseCELListener) ExitConstantLiteral(ctx *ConstantLiteralContext) {} + +// EnterExprList is called when production exprList is entered. +func (s *BaseCELListener) EnterExprList(ctx *ExprListContext) {} + +// ExitExprList is called when production exprList is exited. +func (s *BaseCELListener) ExitExprList(ctx *ExprListContext) {} + +// EnterListInit is called when production listInit is entered. +func (s *BaseCELListener) EnterListInit(ctx *ListInitContext) {} + +// ExitListInit is called when production listInit is exited. +func (s *BaseCELListener) ExitListInit(ctx *ListInitContext) {} + +// EnterFieldInitializerList is called when production fieldInitializerList is entered. +func (s *BaseCELListener) EnterFieldInitializerList(ctx *FieldInitializerListContext) {} + +// ExitFieldInitializerList is called when production fieldInitializerList is exited. +func (s *BaseCELListener) ExitFieldInitializerList(ctx *FieldInitializerListContext) {} + +// EnterOptField is called when production optField is entered. +func (s *BaseCELListener) EnterOptField(ctx *OptFieldContext) {} + +// ExitOptField is called when production optField is exited. +func (s *BaseCELListener) ExitOptField(ctx *OptFieldContext) {} + +// EnterMapInitializerList is called when production mapInitializerList is entered. +func (s *BaseCELListener) EnterMapInitializerList(ctx *MapInitializerListContext) {} + +// ExitMapInitializerList is called when production mapInitializerList is exited. +func (s *BaseCELListener) ExitMapInitializerList(ctx *MapInitializerListContext) {} + +// EnterOptExpr is called when production optExpr is entered. +func (s *BaseCELListener) EnterOptExpr(ctx *OptExprContext) {} + +// ExitOptExpr is called when production optExpr is exited. +func (s *BaseCELListener) ExitOptExpr(ctx *OptExprContext) {} + +// EnterInt is called when production Int is entered. +func (s *BaseCELListener) EnterInt(ctx *IntContext) {} + +// ExitInt is called when production Int is exited. +func (s *BaseCELListener) ExitInt(ctx *IntContext) {} + +// EnterUint is called when production Uint is entered. +func (s *BaseCELListener) EnterUint(ctx *UintContext) {} + +// ExitUint is called when production Uint is exited. +func (s *BaseCELListener) ExitUint(ctx *UintContext) {} + +// EnterDouble is called when production Double is entered. +func (s *BaseCELListener) EnterDouble(ctx *DoubleContext) {} + +// ExitDouble is called when production Double is exited. +func (s *BaseCELListener) ExitDouble(ctx *DoubleContext) {} + +// EnterString is called when production String is entered. +func (s *BaseCELListener) EnterString(ctx *StringContext) {} + +// ExitString is called when production String is exited. +func (s *BaseCELListener) ExitString(ctx *StringContext) {} + +// EnterBytes is called when production Bytes is entered. +func (s *BaseCELListener) EnterBytes(ctx *BytesContext) {} + +// ExitBytes is called when production Bytes is exited. +func (s *BaseCELListener) ExitBytes(ctx *BytesContext) {} + +// EnterBoolTrue is called when production BoolTrue is entered. +func (s *BaseCELListener) EnterBoolTrue(ctx *BoolTrueContext) {} + +// ExitBoolTrue is called when production BoolTrue is exited. +func (s *BaseCELListener) ExitBoolTrue(ctx *BoolTrueContext) {} + +// EnterBoolFalse is called when production BoolFalse is entered. +func (s *BaseCELListener) EnterBoolFalse(ctx *BoolFalseContext) {} + +// ExitBoolFalse is called when production BoolFalse is exited. +func (s *BaseCELListener) ExitBoolFalse(ctx *BoolFalseContext) {} + +// EnterNull is called when production Null is entered. +func (s *BaseCELListener) EnterNull(ctx *NullContext) {} + +// ExitNull is called when production Null is exited. +func (s *BaseCELListener) ExitNull(ctx *NullContext) {} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go new file mode 100644 index 00000000..b2c0783d --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go @@ -0,0 +1,141 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + + +type BaseCELVisitor struct { + *antlr.BaseParseTreeVisitor +} + +func (v *BaseCELVisitor) VisitStart(ctx *StartContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitExpr(ctx *ExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitConditionalOr(ctx *ConditionalOrContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitConditionalAnd(ctx *ConditionalAndContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitRelation(ctx *RelationContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCalc(ctx *CalcContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitMemberExpr(ctx *MemberExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitLogicalNot(ctx *LogicalNotContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitNegate(ctx *NegateContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitMemberCall(ctx *MemberCallContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitSelect(ctx *SelectContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitIndex(ctx *IndexContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitNested(ctx *NestedContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCreateList(ctx *CreateListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCreateStruct(ctx *CreateStructContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitCreateMessage(ctx *CreateMessageContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitExprList(ctx *ExprListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitListInit(ctx *ListInitContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitOptField(ctx *OptFieldContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitMapInitializerList(ctx *MapInitializerListContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitOptExpr(ctx *OptExprContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitInt(ctx *IntContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitUint(ctx *UintContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitDouble(ctx *DoubleContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitString(ctx *StringContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitBytes(ctx *BytesContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitBoolTrue(ctx *BoolTrueContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitBoolFalse(ctx *BoolFalseContext) interface{} { + return v.VisitChildren(ctx) +} + +func (v *BaseCELVisitor) VisitNull(ctx *NullContext) interface{} { + return v.VisitChildren(ctx) +} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go new file mode 100644 index 00000000..e026cc46 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/cel_lexer.go @@ -0,0 +1,344 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen +import ( + "fmt" + "sync" + "unicode" + "github.com/antlr4-go/antlr/v4" +) +// Suppress unused import error +var _ = fmt.Printf +var _ = sync.Once{} +var _ = unicode.IsLetter + + +type CELLexer struct { + *antlr.BaseLexer + channelNames []string + modeNames []string + // TODO: EOF string +} + +var CELLexerLexerStaticData struct { + once sync.Once + serializedATN []int32 + ChannelNames []string + ModeNames []string + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func cellexerLexerInit() { + staticData := &CELLexerLexerStaticData + staticData.ChannelNames = []string{ + "DEFAULT_TOKEN_CHANNEL", "HIDDEN", + } + staticData.ModeNames = []string{ + "DEFAULT_MODE", + } + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "BACKSLASH", "LETTER", "DIGIT", "EXPONENT", "HEXDIGIT", "RAW", + "ESC_SEQ", "ESC_CHAR_SEQ", "ESC_OCT_SEQ", "ESC_BYTE_SEQ", "ESC_UNI_SEQ", + "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", "STRING", + "BYTES", "IDENTIFIER", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 0, 36, 423, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, + 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, + 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, + 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, + 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, + 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, + 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, + 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 2, 39, 7, 39, 2, 40, 7, 40, 2, 41, 7, + 41, 2, 42, 7, 42, 2, 43, 7, 43, 2, 44, 7, 44, 2, 45, 7, 45, 2, 46, 7, 46, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, + 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, + 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 1, 12, 1, 12, 1, 13, 1, 13, + 1, 14, 1, 14, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, + 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, + 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, + 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 29, 1, 29, + 1, 30, 1, 30, 1, 31, 1, 31, 3, 31, 177, 8, 31, 1, 31, 4, 31, 180, 8, 31, + 11, 31, 12, 31, 181, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 34, 1, + 34, 3, 34, 192, 8, 34, 1, 35, 1, 35, 1, 35, 1, 36, 1, 36, 1, 36, 1, 36, + 1, 36, 1, 37, 1, 37, 1, 37, 1, 37, 1, 37, 1, 38, 1, 38, 1, 38, 1, 38, 1, + 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, 1, 38, + 1, 38, 1, 38, 1, 38, 3, 38, 225, 8, 38, 1, 39, 4, 39, 228, 8, 39, 11, 39, + 12, 39, 229, 1, 39, 1, 39, 1, 40, 1, 40, 1, 40, 1, 40, 5, 40, 238, 8, 40, + 10, 40, 12, 40, 241, 9, 40, 1, 40, 1, 40, 1, 41, 4, 41, 246, 8, 41, 11, + 41, 12, 41, 247, 1, 41, 1, 41, 4, 41, 252, 8, 41, 11, 41, 12, 41, 253, + 1, 41, 3, 41, 257, 8, 41, 1, 41, 4, 41, 260, 8, 41, 11, 41, 12, 41, 261, + 1, 41, 1, 41, 1, 41, 1, 41, 4, 41, 268, 8, 41, 11, 41, 12, 41, 269, 1, + 41, 3, 41, 273, 8, 41, 3, 41, 275, 8, 41, 1, 42, 4, 42, 278, 8, 42, 11, + 42, 12, 42, 279, 1, 42, 1, 42, 1, 42, 1, 42, 4, 42, 286, 8, 42, 11, 42, + 12, 42, 287, 3, 42, 290, 8, 42, 1, 43, 4, 43, 293, 8, 43, 11, 43, 12, 43, + 294, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 1, 43, 4, 43, 303, 8, 43, 11, 43, + 12, 43, 304, 1, 43, 1, 43, 3, 43, 309, 8, 43, 1, 44, 1, 44, 1, 44, 5, 44, + 314, 8, 44, 10, 44, 12, 44, 317, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, + 44, 323, 8, 44, 10, 44, 12, 44, 326, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 5, 44, 335, 8, 44, 10, 44, 12, 44, 338, 9, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 349, + 8, 44, 10, 44, 12, 44, 352, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 5, 44, 360, 8, 44, 10, 44, 12, 44, 363, 9, 44, 1, 44, 1, 44, 1, 44, + 1, 44, 1, 44, 5, 44, 370, 8, 44, 10, 44, 12, 44, 373, 9, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 5, 44, 383, 8, 44, 10, 44, + 12, 44, 386, 9, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, 44, 1, + 44, 1, 44, 1, 44, 5, 44, 398, 8, 44, 10, 44, 12, 44, 401, 9, 44, 1, 44, + 1, 44, 1, 44, 1, 44, 3, 44, 407, 8, 44, 1, 45, 1, 45, 1, 45, 1, 46, 1, + 46, 3, 46, 414, 8, 46, 1, 46, 1, 46, 1, 46, 5, 46, 419, 8, 46, 10, 46, + 12, 46, 422, 9, 46, 4, 336, 350, 384, 399, 0, 47, 1, 1, 3, 2, 5, 3, 7, + 4, 9, 5, 11, 6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, + 14, 29, 15, 31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, + 23, 47, 24, 49, 25, 51, 26, 53, 27, 55, 28, 57, 0, 59, 0, 61, 0, 63, 0, + 65, 0, 67, 0, 69, 0, 71, 0, 73, 0, 75, 0, 77, 0, 79, 29, 81, 30, 83, 31, + 85, 32, 87, 33, 89, 34, 91, 35, 93, 36, 1, 0, 16, 2, 0, 65, 90, 97, 122, + 2, 0, 69, 69, 101, 101, 2, 0, 43, 43, 45, 45, 3, 0, 48, 57, 65, 70, 97, + 102, 2, 0, 82, 82, 114, 114, 10, 0, 34, 34, 39, 39, 63, 63, 92, 92, 96, + 98, 102, 102, 110, 110, 114, 114, 116, 116, 118, 118, 2, 0, 88, 88, 120, + 120, 3, 0, 9, 10, 12, 13, 32, 32, 1, 0, 10, 10, 2, 0, 85, 85, 117, 117, + 4, 0, 10, 10, 13, 13, 34, 34, 92, 92, 4, 0, 10, 10, 13, 13, 39, 39, 92, + 92, 1, 0, 92, 92, 3, 0, 10, 10, 13, 13, 34, 34, 3, 0, 10, 10, 13, 13, 39, + 39, 2, 0, 66, 66, 98, 98, 456, 0, 1, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, + 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9, 1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, + 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, + 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, + 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, + 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, + 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, + 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 79, 1, 0, 0, 0, 0, 81, + 1, 0, 0, 0, 0, 83, 1, 0, 0, 0, 0, 85, 1, 0, 0, 0, 0, 87, 1, 0, 0, 0, 0, + 89, 1, 0, 0, 0, 0, 91, 1, 0, 0, 0, 0, 93, 1, 0, 0, 0, 1, 95, 1, 0, 0, 0, + 3, 98, 1, 0, 0, 0, 5, 101, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 106, 1, 0, + 0, 0, 11, 109, 1, 0, 0, 0, 13, 112, 1, 0, 0, 0, 15, 114, 1, 0, 0, 0, 17, + 117, 1, 0, 0, 0, 19, 120, 1, 0, 0, 0, 21, 122, 1, 0, 0, 0, 23, 124, 1, + 0, 0, 0, 25, 126, 1, 0, 0, 0, 27, 128, 1, 0, 0, 0, 29, 130, 1, 0, 0, 0, + 31, 132, 1, 0, 0, 0, 33, 134, 1, 0, 0, 0, 35, 136, 1, 0, 0, 0, 37, 138, + 1, 0, 0, 0, 39, 140, 1, 0, 0, 0, 41, 142, 1, 0, 0, 0, 43, 144, 1, 0, 0, + 0, 45, 146, 1, 0, 0, 0, 47, 148, 1, 0, 0, 0, 49, 150, 1, 0, 0, 0, 51, 152, + 1, 0, 0, 0, 53, 157, 1, 0, 0, 0, 55, 163, 1, 0, 0, 0, 57, 168, 1, 0, 0, + 0, 59, 170, 1, 0, 0, 0, 61, 172, 1, 0, 0, 0, 63, 174, 1, 0, 0, 0, 65, 183, + 1, 0, 0, 0, 67, 185, 1, 0, 0, 0, 69, 191, 1, 0, 0, 0, 71, 193, 1, 0, 0, + 0, 73, 196, 1, 0, 0, 0, 75, 201, 1, 0, 0, 0, 77, 224, 1, 0, 0, 0, 79, 227, + 1, 0, 0, 0, 81, 233, 1, 0, 0, 0, 83, 274, 1, 0, 0, 0, 85, 289, 1, 0, 0, + 0, 87, 308, 1, 0, 0, 0, 89, 406, 1, 0, 0, 0, 91, 408, 1, 0, 0, 0, 93, 413, + 1, 0, 0, 0, 95, 96, 5, 61, 0, 0, 96, 97, 5, 61, 0, 0, 97, 2, 1, 0, 0, 0, + 98, 99, 5, 33, 0, 0, 99, 100, 5, 61, 0, 0, 100, 4, 1, 0, 0, 0, 101, 102, + 5, 105, 0, 0, 102, 103, 5, 110, 0, 0, 103, 6, 1, 0, 0, 0, 104, 105, 5, + 60, 0, 0, 105, 8, 1, 0, 0, 0, 106, 107, 5, 60, 0, 0, 107, 108, 5, 61, 0, + 0, 108, 10, 1, 0, 0, 0, 109, 110, 5, 62, 0, 0, 110, 111, 5, 61, 0, 0, 111, + 12, 1, 0, 0, 0, 112, 113, 5, 62, 0, 0, 113, 14, 1, 0, 0, 0, 114, 115, 5, + 38, 0, 0, 115, 116, 5, 38, 0, 0, 116, 16, 1, 0, 0, 0, 117, 118, 5, 124, + 0, 0, 118, 119, 5, 124, 0, 0, 119, 18, 1, 0, 0, 0, 120, 121, 5, 91, 0, + 0, 121, 20, 1, 0, 0, 0, 122, 123, 5, 93, 0, 0, 123, 22, 1, 0, 0, 0, 124, + 125, 5, 123, 0, 0, 125, 24, 1, 0, 0, 0, 126, 127, 5, 125, 0, 0, 127, 26, + 1, 0, 0, 0, 128, 129, 5, 40, 0, 0, 129, 28, 1, 0, 0, 0, 130, 131, 5, 41, + 0, 0, 131, 30, 1, 0, 0, 0, 132, 133, 5, 46, 0, 0, 133, 32, 1, 0, 0, 0, + 134, 135, 5, 44, 0, 0, 135, 34, 1, 0, 0, 0, 136, 137, 5, 45, 0, 0, 137, + 36, 1, 0, 0, 0, 138, 139, 5, 33, 0, 0, 139, 38, 1, 0, 0, 0, 140, 141, 5, + 63, 0, 0, 141, 40, 1, 0, 0, 0, 142, 143, 5, 58, 0, 0, 143, 42, 1, 0, 0, + 0, 144, 145, 5, 43, 0, 0, 145, 44, 1, 0, 0, 0, 146, 147, 5, 42, 0, 0, 147, + 46, 1, 0, 0, 0, 148, 149, 5, 47, 0, 0, 149, 48, 1, 0, 0, 0, 150, 151, 5, + 37, 0, 0, 151, 50, 1, 0, 0, 0, 152, 153, 5, 116, 0, 0, 153, 154, 5, 114, + 0, 0, 154, 155, 5, 117, 0, 0, 155, 156, 5, 101, 0, 0, 156, 52, 1, 0, 0, + 0, 157, 158, 5, 102, 0, 0, 158, 159, 5, 97, 0, 0, 159, 160, 5, 108, 0, + 0, 160, 161, 5, 115, 0, 0, 161, 162, 5, 101, 0, 0, 162, 54, 1, 0, 0, 0, + 163, 164, 5, 110, 0, 0, 164, 165, 5, 117, 0, 0, 165, 166, 5, 108, 0, 0, + 166, 167, 5, 108, 0, 0, 167, 56, 1, 0, 0, 0, 168, 169, 5, 92, 0, 0, 169, + 58, 1, 0, 0, 0, 170, 171, 7, 0, 0, 0, 171, 60, 1, 0, 0, 0, 172, 173, 2, + 48, 57, 0, 173, 62, 1, 0, 0, 0, 174, 176, 7, 1, 0, 0, 175, 177, 7, 2, 0, + 0, 176, 175, 1, 0, 0, 0, 176, 177, 1, 0, 0, 0, 177, 179, 1, 0, 0, 0, 178, + 180, 3, 61, 30, 0, 179, 178, 1, 0, 0, 0, 180, 181, 1, 0, 0, 0, 181, 179, + 1, 0, 0, 0, 181, 182, 1, 0, 0, 0, 182, 64, 1, 0, 0, 0, 183, 184, 7, 3, + 0, 0, 184, 66, 1, 0, 0, 0, 185, 186, 7, 4, 0, 0, 186, 68, 1, 0, 0, 0, 187, + 192, 3, 71, 35, 0, 188, 192, 3, 75, 37, 0, 189, 192, 3, 77, 38, 0, 190, + 192, 3, 73, 36, 0, 191, 187, 1, 0, 0, 0, 191, 188, 1, 0, 0, 0, 191, 189, + 1, 0, 0, 0, 191, 190, 1, 0, 0, 0, 192, 70, 1, 0, 0, 0, 193, 194, 3, 57, + 28, 0, 194, 195, 7, 5, 0, 0, 195, 72, 1, 0, 0, 0, 196, 197, 3, 57, 28, + 0, 197, 198, 2, 48, 51, 0, 198, 199, 2, 48, 55, 0, 199, 200, 2, 48, 55, + 0, 200, 74, 1, 0, 0, 0, 201, 202, 3, 57, 28, 0, 202, 203, 7, 6, 0, 0, 203, + 204, 3, 65, 32, 0, 204, 205, 3, 65, 32, 0, 205, 76, 1, 0, 0, 0, 206, 207, + 3, 57, 28, 0, 207, 208, 5, 117, 0, 0, 208, 209, 3, 65, 32, 0, 209, 210, + 3, 65, 32, 0, 210, 211, 3, 65, 32, 0, 211, 212, 3, 65, 32, 0, 212, 225, + 1, 0, 0, 0, 213, 214, 3, 57, 28, 0, 214, 215, 5, 85, 0, 0, 215, 216, 3, + 65, 32, 0, 216, 217, 3, 65, 32, 0, 217, 218, 3, 65, 32, 0, 218, 219, 3, + 65, 32, 0, 219, 220, 3, 65, 32, 0, 220, 221, 3, 65, 32, 0, 221, 222, 3, + 65, 32, 0, 222, 223, 3, 65, 32, 0, 223, 225, 1, 0, 0, 0, 224, 206, 1, 0, + 0, 0, 224, 213, 1, 0, 0, 0, 225, 78, 1, 0, 0, 0, 226, 228, 7, 7, 0, 0, + 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, + 230, 1, 0, 0, 0, 230, 231, 1, 0, 0, 0, 231, 232, 6, 39, 0, 0, 232, 80, + 1, 0, 0, 0, 233, 234, 5, 47, 0, 0, 234, 235, 5, 47, 0, 0, 235, 239, 1, + 0, 0, 0, 236, 238, 8, 8, 0, 0, 237, 236, 1, 0, 0, 0, 238, 241, 1, 0, 0, + 0, 239, 237, 1, 0, 0, 0, 239, 240, 1, 0, 0, 0, 240, 242, 1, 0, 0, 0, 241, + 239, 1, 0, 0, 0, 242, 243, 6, 40, 0, 0, 243, 82, 1, 0, 0, 0, 244, 246, + 3, 61, 30, 0, 245, 244, 1, 0, 0, 0, 246, 247, 1, 0, 0, 0, 247, 245, 1, + 0, 0, 0, 247, 248, 1, 0, 0, 0, 248, 249, 1, 0, 0, 0, 249, 251, 5, 46, 0, + 0, 250, 252, 3, 61, 30, 0, 251, 250, 1, 0, 0, 0, 252, 253, 1, 0, 0, 0, + 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254, 256, 1, 0, 0, 0, 255, + 257, 3, 63, 31, 0, 256, 255, 1, 0, 0, 0, 256, 257, 1, 0, 0, 0, 257, 275, + 1, 0, 0, 0, 258, 260, 3, 61, 30, 0, 259, 258, 1, 0, 0, 0, 260, 261, 1, + 0, 0, 0, 261, 259, 1, 0, 0, 0, 261, 262, 1, 0, 0, 0, 262, 263, 1, 0, 0, + 0, 263, 264, 3, 63, 31, 0, 264, 275, 1, 0, 0, 0, 265, 267, 5, 46, 0, 0, + 266, 268, 3, 61, 30, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0, 0, 0, 269, + 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 272, 1, 0, 0, 0, 271, 273, + 3, 63, 31, 0, 272, 271, 1, 0, 0, 0, 272, 273, 1, 0, 0, 0, 273, 275, 1, + 0, 0, 0, 274, 245, 1, 0, 0, 0, 274, 259, 1, 0, 0, 0, 274, 265, 1, 0, 0, + 0, 275, 84, 1, 0, 0, 0, 276, 278, 3, 61, 30, 0, 277, 276, 1, 0, 0, 0, 278, + 279, 1, 0, 0, 0, 279, 277, 1, 0, 0, 0, 279, 280, 1, 0, 0, 0, 280, 290, + 1, 0, 0, 0, 281, 282, 5, 48, 0, 0, 282, 283, 5, 120, 0, 0, 283, 285, 1, + 0, 0, 0, 284, 286, 3, 65, 32, 0, 285, 284, 1, 0, 0, 0, 286, 287, 1, 0, + 0, 0, 287, 285, 1, 0, 0, 0, 287, 288, 1, 0, 0, 0, 288, 290, 1, 0, 0, 0, + 289, 277, 1, 0, 0, 0, 289, 281, 1, 0, 0, 0, 290, 86, 1, 0, 0, 0, 291, 293, + 3, 61, 30, 0, 292, 291, 1, 0, 0, 0, 293, 294, 1, 0, 0, 0, 294, 292, 1, + 0, 0, 0, 294, 295, 1, 0, 0, 0, 295, 296, 1, 0, 0, 0, 296, 297, 7, 9, 0, + 0, 297, 309, 1, 0, 0, 0, 298, 299, 5, 48, 0, 0, 299, 300, 5, 120, 0, 0, + 300, 302, 1, 0, 0, 0, 301, 303, 3, 65, 32, 0, 302, 301, 1, 0, 0, 0, 303, + 304, 1, 0, 0, 0, 304, 302, 1, 0, 0, 0, 304, 305, 1, 0, 0, 0, 305, 306, + 1, 0, 0, 0, 306, 307, 7, 9, 0, 0, 307, 309, 1, 0, 0, 0, 308, 292, 1, 0, + 0, 0, 308, 298, 1, 0, 0, 0, 309, 88, 1, 0, 0, 0, 310, 315, 5, 34, 0, 0, + 311, 314, 3, 69, 34, 0, 312, 314, 8, 10, 0, 0, 313, 311, 1, 0, 0, 0, 313, + 312, 1, 0, 0, 0, 314, 317, 1, 0, 0, 0, 315, 313, 1, 0, 0, 0, 315, 316, + 1, 0, 0, 0, 316, 318, 1, 0, 0, 0, 317, 315, 1, 0, 0, 0, 318, 407, 5, 34, + 0, 0, 319, 324, 5, 39, 0, 0, 320, 323, 3, 69, 34, 0, 321, 323, 8, 11, 0, + 0, 322, 320, 1, 0, 0, 0, 322, 321, 1, 0, 0, 0, 323, 326, 1, 0, 0, 0, 324, + 322, 1, 0, 0, 0, 324, 325, 1, 0, 0, 0, 325, 327, 1, 0, 0, 0, 326, 324, + 1, 0, 0, 0, 327, 407, 5, 39, 0, 0, 328, 329, 5, 34, 0, 0, 329, 330, 5, + 34, 0, 0, 330, 331, 5, 34, 0, 0, 331, 336, 1, 0, 0, 0, 332, 335, 3, 69, + 34, 0, 333, 335, 8, 12, 0, 0, 334, 332, 1, 0, 0, 0, 334, 333, 1, 0, 0, + 0, 335, 338, 1, 0, 0, 0, 336, 337, 1, 0, 0, 0, 336, 334, 1, 0, 0, 0, 337, + 339, 1, 0, 0, 0, 338, 336, 1, 0, 0, 0, 339, 340, 5, 34, 0, 0, 340, 341, + 5, 34, 0, 0, 341, 407, 5, 34, 0, 0, 342, 343, 5, 39, 0, 0, 343, 344, 5, + 39, 0, 0, 344, 345, 5, 39, 0, 0, 345, 350, 1, 0, 0, 0, 346, 349, 3, 69, + 34, 0, 347, 349, 8, 12, 0, 0, 348, 346, 1, 0, 0, 0, 348, 347, 1, 0, 0, + 0, 349, 352, 1, 0, 0, 0, 350, 351, 1, 0, 0, 0, 350, 348, 1, 0, 0, 0, 351, + 353, 1, 0, 0, 0, 352, 350, 1, 0, 0, 0, 353, 354, 5, 39, 0, 0, 354, 355, + 5, 39, 0, 0, 355, 407, 5, 39, 0, 0, 356, 357, 3, 67, 33, 0, 357, 361, 5, + 34, 0, 0, 358, 360, 8, 13, 0, 0, 359, 358, 1, 0, 0, 0, 360, 363, 1, 0, + 0, 0, 361, 359, 1, 0, 0, 0, 361, 362, 1, 0, 0, 0, 362, 364, 1, 0, 0, 0, + 363, 361, 1, 0, 0, 0, 364, 365, 5, 34, 0, 0, 365, 407, 1, 0, 0, 0, 366, + 367, 3, 67, 33, 0, 367, 371, 5, 39, 0, 0, 368, 370, 8, 14, 0, 0, 369, 368, + 1, 0, 0, 0, 370, 373, 1, 0, 0, 0, 371, 369, 1, 0, 0, 0, 371, 372, 1, 0, + 0, 0, 372, 374, 1, 0, 0, 0, 373, 371, 1, 0, 0, 0, 374, 375, 5, 39, 0, 0, + 375, 407, 1, 0, 0, 0, 376, 377, 3, 67, 33, 0, 377, 378, 5, 34, 0, 0, 378, + 379, 5, 34, 0, 0, 379, 380, 5, 34, 0, 0, 380, 384, 1, 0, 0, 0, 381, 383, + 9, 0, 0, 0, 382, 381, 1, 0, 0, 0, 383, 386, 1, 0, 0, 0, 384, 385, 1, 0, + 0, 0, 384, 382, 1, 0, 0, 0, 385, 387, 1, 0, 0, 0, 386, 384, 1, 0, 0, 0, + 387, 388, 5, 34, 0, 0, 388, 389, 5, 34, 0, 0, 389, 390, 5, 34, 0, 0, 390, + 407, 1, 0, 0, 0, 391, 392, 3, 67, 33, 0, 392, 393, 5, 39, 0, 0, 393, 394, + 5, 39, 0, 0, 394, 395, 5, 39, 0, 0, 395, 399, 1, 0, 0, 0, 396, 398, 9, + 0, 0, 0, 397, 396, 1, 0, 0, 0, 398, 401, 1, 0, 0, 0, 399, 400, 1, 0, 0, + 0, 399, 397, 1, 0, 0, 0, 400, 402, 1, 0, 0, 0, 401, 399, 1, 0, 0, 0, 402, + 403, 5, 39, 0, 0, 403, 404, 5, 39, 0, 0, 404, 405, 5, 39, 0, 0, 405, 407, + 1, 0, 0, 0, 406, 310, 1, 0, 0, 0, 406, 319, 1, 0, 0, 0, 406, 328, 1, 0, + 0, 0, 406, 342, 1, 0, 0, 0, 406, 356, 1, 0, 0, 0, 406, 366, 1, 0, 0, 0, + 406, 376, 1, 0, 0, 0, 406, 391, 1, 0, 0, 0, 407, 90, 1, 0, 0, 0, 408, 409, + 7, 15, 0, 0, 409, 410, 3, 89, 44, 0, 410, 92, 1, 0, 0, 0, 411, 414, 3, + 59, 29, 0, 412, 414, 5, 95, 0, 0, 413, 411, 1, 0, 0, 0, 413, 412, 1, 0, + 0, 0, 414, 420, 1, 0, 0, 0, 415, 419, 3, 59, 29, 0, 416, 419, 3, 61, 30, + 0, 417, 419, 5, 95, 0, 0, 418, 415, 1, 0, 0, 0, 418, 416, 1, 0, 0, 0, 418, + 417, 1, 0, 0, 0, 419, 422, 1, 0, 0, 0, 420, 418, 1, 0, 0, 0, 420, 421, + 1, 0, 0, 0, 421, 94, 1, 0, 0, 0, 422, 420, 1, 0, 0, 0, 36, 0, 176, 181, + 191, 224, 229, 239, 247, 253, 256, 261, 269, 272, 274, 279, 287, 289, 294, + 304, 308, 313, 315, 322, 324, 334, 336, 348, 350, 361, 371, 384, 399, 406, + 413, 418, 420, 1, 0, 1, 0, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +} + +// CELLexerInit initializes any static state used to implement CELLexer. By default the +// static state used to implement the lexer is lazily initialized during the first call to +// NewCELLexer(). You can call this function if you wish to initialize the static state ahead +// of time. +func CELLexerInit() { + staticData := &CELLexerLexerStaticData + staticData.once.Do(cellexerLexerInit) +} + +// NewCELLexer produces a new lexer instance for the optional input antlr.CharStream. +func NewCELLexer(input antlr.CharStream) *CELLexer { + CELLexerInit() + l := new(CELLexer) + l.BaseLexer = antlr.NewBaseLexer(input) + staticData := &CELLexerLexerStaticData + l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + l.channelNames = staticData.ChannelNames + l.modeNames = staticData.ModeNames + l.RuleNames = staticData.RuleNames + l.LiteralNames = staticData.LiteralNames + l.SymbolicNames = staticData.SymbolicNames + l.GrammarFileName = "CEL.g4" + // TODO: l.EOF = antlr.TokenEOF + + return l +} + +// CELLexer tokens. +const ( + CELLexerEQUALS = 1 + CELLexerNOT_EQUALS = 2 + CELLexerIN = 3 + CELLexerLESS = 4 + CELLexerLESS_EQUALS = 5 + CELLexerGREATER_EQUALS = 6 + CELLexerGREATER = 7 + CELLexerLOGICAL_AND = 8 + CELLexerLOGICAL_OR = 9 + CELLexerLBRACKET = 10 + CELLexerRPRACKET = 11 + CELLexerLBRACE = 12 + CELLexerRBRACE = 13 + CELLexerLPAREN = 14 + CELLexerRPAREN = 15 + CELLexerDOT = 16 + CELLexerCOMMA = 17 + CELLexerMINUS = 18 + CELLexerEXCLAM = 19 + CELLexerQUESTIONMARK = 20 + CELLexerCOLON = 21 + CELLexerPLUS = 22 + CELLexerSTAR = 23 + CELLexerSLASH = 24 + CELLexerPERCENT = 25 + CELLexerCEL_TRUE = 26 + CELLexerCEL_FALSE = 27 + CELLexerNUL = 28 + CELLexerWHITESPACE = 29 + CELLexerCOMMENT = 30 + CELLexerNUM_FLOAT = 31 + CELLexerNUM_INT = 32 + CELLexerNUM_UINT = 33 + CELLexerSTRING = 34 + CELLexerBYTES = 35 + CELLexerIDENTIFIER = 36 +) + diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_listener.go b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go new file mode 100644 index 00000000..22dc9978 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/cel_listener.go @@ -0,0 +1,208 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + + +// CELListener is a complete listener for a parse tree produced by CELParser. +type CELListener interface { + antlr.ParseTreeListener + + // EnterStart is called when entering the start production. + EnterStart(c *StartContext) + + // EnterExpr is called when entering the expr production. + EnterExpr(c *ExprContext) + + // EnterConditionalOr is called when entering the conditionalOr production. + EnterConditionalOr(c *ConditionalOrContext) + + // EnterConditionalAnd is called when entering the conditionalAnd production. + EnterConditionalAnd(c *ConditionalAndContext) + + // EnterRelation is called when entering the relation production. + EnterRelation(c *RelationContext) + + // EnterCalc is called when entering the calc production. + EnterCalc(c *CalcContext) + + // EnterMemberExpr is called when entering the MemberExpr production. + EnterMemberExpr(c *MemberExprContext) + + // EnterLogicalNot is called when entering the LogicalNot production. + EnterLogicalNot(c *LogicalNotContext) + + // EnterNegate is called when entering the Negate production. + EnterNegate(c *NegateContext) + + // EnterMemberCall is called when entering the MemberCall production. + EnterMemberCall(c *MemberCallContext) + + // EnterSelect is called when entering the Select production. + EnterSelect(c *SelectContext) + + // EnterPrimaryExpr is called when entering the PrimaryExpr production. + EnterPrimaryExpr(c *PrimaryExprContext) + + // EnterIndex is called when entering the Index production. + EnterIndex(c *IndexContext) + + // EnterIdentOrGlobalCall is called when entering the IdentOrGlobalCall production. + EnterIdentOrGlobalCall(c *IdentOrGlobalCallContext) + + // EnterNested is called when entering the Nested production. + EnterNested(c *NestedContext) + + // EnterCreateList is called when entering the CreateList production. + EnterCreateList(c *CreateListContext) + + // EnterCreateStruct is called when entering the CreateStruct production. + EnterCreateStruct(c *CreateStructContext) + + // EnterCreateMessage is called when entering the CreateMessage production. + EnterCreateMessage(c *CreateMessageContext) + + // EnterConstantLiteral is called when entering the ConstantLiteral production. + EnterConstantLiteral(c *ConstantLiteralContext) + + // EnterExprList is called when entering the exprList production. + EnterExprList(c *ExprListContext) + + // EnterListInit is called when entering the listInit production. + EnterListInit(c *ListInitContext) + + // EnterFieldInitializerList is called when entering the fieldInitializerList production. + EnterFieldInitializerList(c *FieldInitializerListContext) + + // EnterOptField is called when entering the optField production. + EnterOptField(c *OptFieldContext) + + // EnterMapInitializerList is called when entering the mapInitializerList production. + EnterMapInitializerList(c *MapInitializerListContext) + + // EnterOptExpr is called when entering the optExpr production. + EnterOptExpr(c *OptExprContext) + + // EnterInt is called when entering the Int production. + EnterInt(c *IntContext) + + // EnterUint is called when entering the Uint production. + EnterUint(c *UintContext) + + // EnterDouble is called when entering the Double production. + EnterDouble(c *DoubleContext) + + // EnterString is called when entering the String production. + EnterString(c *StringContext) + + // EnterBytes is called when entering the Bytes production. + EnterBytes(c *BytesContext) + + // EnterBoolTrue is called when entering the BoolTrue production. + EnterBoolTrue(c *BoolTrueContext) + + // EnterBoolFalse is called when entering the BoolFalse production. + EnterBoolFalse(c *BoolFalseContext) + + // EnterNull is called when entering the Null production. + EnterNull(c *NullContext) + + // ExitStart is called when exiting the start production. + ExitStart(c *StartContext) + + // ExitExpr is called when exiting the expr production. + ExitExpr(c *ExprContext) + + // ExitConditionalOr is called when exiting the conditionalOr production. + ExitConditionalOr(c *ConditionalOrContext) + + // ExitConditionalAnd is called when exiting the conditionalAnd production. + ExitConditionalAnd(c *ConditionalAndContext) + + // ExitRelation is called when exiting the relation production. + ExitRelation(c *RelationContext) + + // ExitCalc is called when exiting the calc production. + ExitCalc(c *CalcContext) + + // ExitMemberExpr is called when exiting the MemberExpr production. + ExitMemberExpr(c *MemberExprContext) + + // ExitLogicalNot is called when exiting the LogicalNot production. + ExitLogicalNot(c *LogicalNotContext) + + // ExitNegate is called when exiting the Negate production. + ExitNegate(c *NegateContext) + + // ExitMemberCall is called when exiting the MemberCall production. + ExitMemberCall(c *MemberCallContext) + + // ExitSelect is called when exiting the Select production. + ExitSelect(c *SelectContext) + + // ExitPrimaryExpr is called when exiting the PrimaryExpr production. + ExitPrimaryExpr(c *PrimaryExprContext) + + // ExitIndex is called when exiting the Index production. + ExitIndex(c *IndexContext) + + // ExitIdentOrGlobalCall is called when exiting the IdentOrGlobalCall production. + ExitIdentOrGlobalCall(c *IdentOrGlobalCallContext) + + // ExitNested is called when exiting the Nested production. + ExitNested(c *NestedContext) + + // ExitCreateList is called when exiting the CreateList production. + ExitCreateList(c *CreateListContext) + + // ExitCreateStruct is called when exiting the CreateStruct production. + ExitCreateStruct(c *CreateStructContext) + + // ExitCreateMessage is called when exiting the CreateMessage production. + ExitCreateMessage(c *CreateMessageContext) + + // ExitConstantLiteral is called when exiting the ConstantLiteral production. + ExitConstantLiteral(c *ConstantLiteralContext) + + // ExitExprList is called when exiting the exprList production. + ExitExprList(c *ExprListContext) + + // ExitListInit is called when exiting the listInit production. + ExitListInit(c *ListInitContext) + + // ExitFieldInitializerList is called when exiting the fieldInitializerList production. + ExitFieldInitializerList(c *FieldInitializerListContext) + + // ExitOptField is called when exiting the optField production. + ExitOptField(c *OptFieldContext) + + // ExitMapInitializerList is called when exiting the mapInitializerList production. + ExitMapInitializerList(c *MapInitializerListContext) + + // ExitOptExpr is called when exiting the optExpr production. + ExitOptExpr(c *OptExprContext) + + // ExitInt is called when exiting the Int production. + ExitInt(c *IntContext) + + // ExitUint is called when exiting the Uint production. + ExitUint(c *UintContext) + + // ExitDouble is called when exiting the Double production. + ExitDouble(c *DoubleContext) + + // ExitString is called when exiting the String production. + ExitString(c *StringContext) + + // ExitBytes is called when exiting the Bytes production. + ExitBytes(c *BytesContext) + + // ExitBoolTrue is called when exiting the BoolTrue production. + ExitBoolTrue(c *BoolTrueContext) + + // ExitBoolFalse is called when exiting the BoolFalse production. + ExitBoolFalse(c *BoolFalseContext) + + // ExitNull is called when exiting the Null production. + ExitNull(c *NullContext) +} diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_parser.go b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go new file mode 100644 index 00000000..35334af6 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/cel_parser.go @@ -0,0 +1,6274 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import ( + "fmt" + "strconv" + "sync" + + "github.com/antlr4-go/antlr/v4" +) + +// Suppress unused import errors +var _ = fmt.Printf +var _ = strconv.Itoa +var _ = sync.Once{} + + +type CELParser struct { + *antlr.BaseParser +} + +var CELParserStaticData struct { + once sync.Once + serializedATN []int32 + LiteralNames []string + SymbolicNames []string + RuleNames []string + PredictionContextCache *antlr.PredictionContextCache + atn *antlr.ATN + decisionToDFA []*antlr.DFA +} + +func celParserInit() { + staticData := &CELParserStaticData + staticData.LiteralNames = []string{ + "", "'=='", "'!='", "'in'", "'<'", "'<='", "'>='", "'>'", "'&&'", "'||'", + "'['", "']'", "'{'", "'}'", "'('", "')'", "'.'", "','", "'-'", "'!'", + "'?'", "':'", "'+'", "'*'", "'/'", "'%'", "'true'", "'false'", "'null'", + } + staticData.SymbolicNames = []string{ + "", "EQUALS", "NOT_EQUALS", "IN", "LESS", "LESS_EQUALS", "GREATER_EQUALS", + "GREATER", "LOGICAL_AND", "LOGICAL_OR", "LBRACKET", "RPRACKET", "LBRACE", + "RBRACE", "LPAREN", "RPAREN", "DOT", "COMMA", "MINUS", "EXCLAM", "QUESTIONMARK", + "COLON", "PLUS", "STAR", "SLASH", "PERCENT", "CEL_TRUE", "CEL_FALSE", + "NUL", "WHITESPACE", "COMMENT", "NUM_FLOAT", "NUM_INT", "NUM_UINT", + "STRING", "BYTES", "IDENTIFIER", + } + staticData.RuleNames = []string{ + "start", "expr", "conditionalOr", "conditionalAnd", "relation", "calc", + "unary", "member", "primary", "exprList", "listInit", "fieldInitializerList", + "optField", "mapInitializerList", "optExpr", "literal", + } + staticData.PredictionContextCache = antlr.NewPredictionContextCache() + staticData.serializedATN = []int32{ + 4, 1, 36, 251, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, + 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, + 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, + 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 42, 8, 1, 1, + 2, 1, 2, 1, 2, 5, 2, 47, 8, 2, 10, 2, 12, 2, 50, 9, 2, 1, 3, 1, 3, 1, 3, + 5, 3, 55, 8, 3, 10, 3, 12, 3, 58, 9, 3, 1, 4, 1, 4, 1, 4, 1, 4, 1, 4, 1, + 4, 5, 4, 66, 8, 4, 10, 4, 12, 4, 69, 9, 4, 1, 5, 1, 5, 1, 5, 1, 5, 1, 5, + 1, 5, 1, 5, 1, 5, 1, 5, 5, 5, 80, 8, 5, 10, 5, 12, 5, 83, 9, 5, 1, 6, 1, + 6, 4, 6, 87, 8, 6, 11, 6, 12, 6, 88, 1, 6, 1, 6, 4, 6, 93, 8, 6, 11, 6, + 12, 6, 94, 1, 6, 3, 6, 98, 8, 6, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, + 7, 106, 8, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 1, 7, 3, 7, 114, 8, 7, 1, 7, + 1, 7, 1, 7, 1, 7, 3, 7, 120, 8, 7, 1, 7, 1, 7, 1, 7, 5, 7, 125, 8, 7, 10, + 7, 12, 7, 128, 9, 7, 1, 8, 3, 8, 131, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 136, + 8, 8, 1, 8, 3, 8, 139, 8, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 1, 8, 3, 8, + 147, 8, 8, 1, 8, 3, 8, 150, 8, 8, 1, 8, 1, 8, 1, 8, 3, 8, 155, 8, 8, 1, + 8, 3, 8, 158, 8, 8, 1, 8, 1, 8, 3, 8, 162, 8, 8, 1, 8, 1, 8, 1, 8, 5, 8, + 167, 8, 8, 10, 8, 12, 8, 170, 9, 8, 1, 8, 1, 8, 3, 8, 174, 8, 8, 1, 8, + 3, 8, 177, 8, 8, 1, 8, 1, 8, 3, 8, 181, 8, 8, 1, 9, 1, 9, 1, 9, 5, 9, 186, + 8, 9, 10, 9, 12, 9, 189, 9, 9, 1, 10, 1, 10, 1, 10, 5, 10, 194, 8, 10, + 10, 10, 12, 10, 197, 9, 10, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, 11, 1, + 11, 1, 11, 5, 11, 207, 8, 11, 10, 11, 12, 11, 210, 9, 11, 1, 12, 3, 12, + 213, 8, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, 13, 1, + 13, 1, 13, 5, 13, 225, 8, 13, 10, 13, 12, 13, 228, 9, 13, 1, 14, 3, 14, + 231, 8, 14, 1, 14, 1, 14, 1, 15, 3, 15, 236, 8, 15, 1, 15, 1, 15, 1, 15, + 3, 15, 241, 8, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 15, 3, 15, 249, + 8, 15, 1, 15, 0, 3, 8, 10, 14, 16, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, + 22, 24, 26, 28, 30, 0, 3, 1, 0, 1, 7, 1, 0, 23, 25, 2, 0, 18, 18, 22, 22, + 281, 0, 32, 1, 0, 0, 0, 2, 35, 1, 0, 0, 0, 4, 43, 1, 0, 0, 0, 6, 51, 1, + 0, 0, 0, 8, 59, 1, 0, 0, 0, 10, 70, 1, 0, 0, 0, 12, 97, 1, 0, 0, 0, 14, + 99, 1, 0, 0, 0, 16, 180, 1, 0, 0, 0, 18, 182, 1, 0, 0, 0, 20, 190, 1, 0, + 0, 0, 22, 198, 1, 0, 0, 0, 24, 212, 1, 0, 0, 0, 26, 216, 1, 0, 0, 0, 28, + 230, 1, 0, 0, 0, 30, 248, 1, 0, 0, 0, 32, 33, 3, 2, 1, 0, 33, 34, 5, 0, + 0, 1, 34, 1, 1, 0, 0, 0, 35, 41, 3, 4, 2, 0, 36, 37, 5, 20, 0, 0, 37, 38, + 3, 4, 2, 0, 38, 39, 5, 21, 0, 0, 39, 40, 3, 2, 1, 0, 40, 42, 1, 0, 0, 0, + 41, 36, 1, 0, 0, 0, 41, 42, 1, 0, 0, 0, 42, 3, 1, 0, 0, 0, 43, 48, 3, 6, + 3, 0, 44, 45, 5, 9, 0, 0, 45, 47, 3, 6, 3, 0, 46, 44, 1, 0, 0, 0, 47, 50, + 1, 0, 0, 0, 48, 46, 1, 0, 0, 0, 48, 49, 1, 0, 0, 0, 49, 5, 1, 0, 0, 0, + 50, 48, 1, 0, 0, 0, 51, 56, 3, 8, 4, 0, 52, 53, 5, 8, 0, 0, 53, 55, 3, + 8, 4, 0, 54, 52, 1, 0, 0, 0, 55, 58, 1, 0, 0, 0, 56, 54, 1, 0, 0, 0, 56, + 57, 1, 0, 0, 0, 57, 7, 1, 0, 0, 0, 58, 56, 1, 0, 0, 0, 59, 60, 6, 4, -1, + 0, 60, 61, 3, 10, 5, 0, 61, 67, 1, 0, 0, 0, 62, 63, 10, 1, 0, 0, 63, 64, + 7, 0, 0, 0, 64, 66, 3, 8, 4, 2, 65, 62, 1, 0, 0, 0, 66, 69, 1, 0, 0, 0, + 67, 65, 1, 0, 0, 0, 67, 68, 1, 0, 0, 0, 68, 9, 1, 0, 0, 0, 69, 67, 1, 0, + 0, 0, 70, 71, 6, 5, -1, 0, 71, 72, 3, 12, 6, 0, 72, 81, 1, 0, 0, 0, 73, + 74, 10, 2, 0, 0, 74, 75, 7, 1, 0, 0, 75, 80, 3, 10, 5, 3, 76, 77, 10, 1, + 0, 0, 77, 78, 7, 2, 0, 0, 78, 80, 3, 10, 5, 2, 79, 73, 1, 0, 0, 0, 79, + 76, 1, 0, 0, 0, 80, 83, 1, 0, 0, 0, 81, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, + 0, 82, 11, 1, 0, 0, 0, 83, 81, 1, 0, 0, 0, 84, 98, 3, 14, 7, 0, 85, 87, + 5, 19, 0, 0, 86, 85, 1, 0, 0, 0, 87, 88, 1, 0, 0, 0, 88, 86, 1, 0, 0, 0, + 88, 89, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 98, 3, 14, 7, 0, 91, 93, 5, + 18, 0, 0, 92, 91, 1, 0, 0, 0, 93, 94, 1, 0, 0, 0, 94, 92, 1, 0, 0, 0, 94, + 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 98, 3, 14, 7, 0, 97, 84, 1, 0, + 0, 0, 97, 86, 1, 0, 0, 0, 97, 92, 1, 0, 0, 0, 98, 13, 1, 0, 0, 0, 99, 100, + 6, 7, -1, 0, 100, 101, 3, 16, 8, 0, 101, 126, 1, 0, 0, 0, 102, 103, 10, + 3, 0, 0, 103, 105, 5, 16, 0, 0, 104, 106, 5, 20, 0, 0, 105, 104, 1, 0, + 0, 0, 105, 106, 1, 0, 0, 0, 106, 107, 1, 0, 0, 0, 107, 125, 5, 36, 0, 0, + 108, 109, 10, 2, 0, 0, 109, 110, 5, 16, 0, 0, 110, 111, 5, 36, 0, 0, 111, + 113, 5, 14, 0, 0, 112, 114, 3, 18, 9, 0, 113, 112, 1, 0, 0, 0, 113, 114, + 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115, 125, 5, 15, 0, 0, 116, 117, 10, + 1, 0, 0, 117, 119, 5, 10, 0, 0, 118, 120, 5, 20, 0, 0, 119, 118, 1, 0, + 0, 0, 119, 120, 1, 0, 0, 0, 120, 121, 1, 0, 0, 0, 121, 122, 3, 2, 1, 0, + 122, 123, 5, 11, 0, 0, 123, 125, 1, 0, 0, 0, 124, 102, 1, 0, 0, 0, 124, + 108, 1, 0, 0, 0, 124, 116, 1, 0, 0, 0, 125, 128, 1, 0, 0, 0, 126, 124, + 1, 0, 0, 0, 126, 127, 1, 0, 0, 0, 127, 15, 1, 0, 0, 0, 128, 126, 1, 0, + 0, 0, 129, 131, 5, 16, 0, 0, 130, 129, 1, 0, 0, 0, 130, 131, 1, 0, 0, 0, + 131, 132, 1, 0, 0, 0, 132, 138, 5, 36, 0, 0, 133, 135, 5, 14, 0, 0, 134, + 136, 3, 18, 9, 0, 135, 134, 1, 0, 0, 0, 135, 136, 1, 0, 0, 0, 136, 137, + 1, 0, 0, 0, 137, 139, 5, 15, 0, 0, 138, 133, 1, 0, 0, 0, 138, 139, 1, 0, + 0, 0, 139, 181, 1, 0, 0, 0, 140, 141, 5, 14, 0, 0, 141, 142, 3, 2, 1, 0, + 142, 143, 5, 15, 0, 0, 143, 181, 1, 0, 0, 0, 144, 146, 5, 10, 0, 0, 145, + 147, 3, 20, 10, 0, 146, 145, 1, 0, 0, 0, 146, 147, 1, 0, 0, 0, 147, 149, + 1, 0, 0, 0, 148, 150, 5, 17, 0, 0, 149, 148, 1, 0, 0, 0, 149, 150, 1, 0, + 0, 0, 150, 151, 1, 0, 0, 0, 151, 181, 5, 11, 0, 0, 152, 154, 5, 12, 0, + 0, 153, 155, 3, 26, 13, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, + 155, 157, 1, 0, 0, 0, 156, 158, 5, 17, 0, 0, 157, 156, 1, 0, 0, 0, 157, + 158, 1, 0, 0, 0, 158, 159, 1, 0, 0, 0, 159, 181, 5, 13, 0, 0, 160, 162, + 5, 16, 0, 0, 161, 160, 1, 0, 0, 0, 161, 162, 1, 0, 0, 0, 162, 163, 1, 0, + 0, 0, 163, 168, 5, 36, 0, 0, 164, 165, 5, 16, 0, 0, 165, 167, 5, 36, 0, + 0, 166, 164, 1, 0, 0, 0, 167, 170, 1, 0, 0, 0, 168, 166, 1, 0, 0, 0, 168, + 169, 1, 0, 0, 0, 169, 171, 1, 0, 0, 0, 170, 168, 1, 0, 0, 0, 171, 173, + 5, 12, 0, 0, 172, 174, 3, 22, 11, 0, 173, 172, 1, 0, 0, 0, 173, 174, 1, + 0, 0, 0, 174, 176, 1, 0, 0, 0, 175, 177, 5, 17, 0, 0, 176, 175, 1, 0, 0, + 0, 176, 177, 1, 0, 0, 0, 177, 178, 1, 0, 0, 0, 178, 181, 5, 13, 0, 0, 179, + 181, 3, 30, 15, 0, 180, 130, 1, 0, 0, 0, 180, 140, 1, 0, 0, 0, 180, 144, + 1, 0, 0, 0, 180, 152, 1, 0, 0, 0, 180, 161, 1, 0, 0, 0, 180, 179, 1, 0, + 0, 0, 181, 17, 1, 0, 0, 0, 182, 187, 3, 2, 1, 0, 183, 184, 5, 17, 0, 0, + 184, 186, 3, 2, 1, 0, 185, 183, 1, 0, 0, 0, 186, 189, 1, 0, 0, 0, 187, + 185, 1, 0, 0, 0, 187, 188, 1, 0, 0, 0, 188, 19, 1, 0, 0, 0, 189, 187, 1, + 0, 0, 0, 190, 195, 3, 28, 14, 0, 191, 192, 5, 17, 0, 0, 192, 194, 3, 28, + 14, 0, 193, 191, 1, 0, 0, 0, 194, 197, 1, 0, 0, 0, 195, 193, 1, 0, 0, 0, + 195, 196, 1, 0, 0, 0, 196, 21, 1, 0, 0, 0, 197, 195, 1, 0, 0, 0, 198, 199, + 3, 24, 12, 0, 199, 200, 5, 21, 0, 0, 200, 208, 3, 2, 1, 0, 201, 202, 5, + 17, 0, 0, 202, 203, 3, 24, 12, 0, 203, 204, 5, 21, 0, 0, 204, 205, 3, 2, + 1, 0, 205, 207, 1, 0, 0, 0, 206, 201, 1, 0, 0, 0, 207, 210, 1, 0, 0, 0, + 208, 206, 1, 0, 0, 0, 208, 209, 1, 0, 0, 0, 209, 23, 1, 0, 0, 0, 210, 208, + 1, 0, 0, 0, 211, 213, 5, 20, 0, 0, 212, 211, 1, 0, 0, 0, 212, 213, 1, 0, + 0, 0, 213, 214, 1, 0, 0, 0, 214, 215, 5, 36, 0, 0, 215, 25, 1, 0, 0, 0, + 216, 217, 3, 28, 14, 0, 217, 218, 5, 21, 0, 0, 218, 226, 3, 2, 1, 0, 219, + 220, 5, 17, 0, 0, 220, 221, 3, 28, 14, 0, 221, 222, 5, 21, 0, 0, 222, 223, + 3, 2, 1, 0, 223, 225, 1, 0, 0, 0, 224, 219, 1, 0, 0, 0, 225, 228, 1, 0, + 0, 0, 226, 224, 1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 27, 1, 0, 0, 0, + 228, 226, 1, 0, 0, 0, 229, 231, 5, 20, 0, 0, 230, 229, 1, 0, 0, 0, 230, + 231, 1, 0, 0, 0, 231, 232, 1, 0, 0, 0, 232, 233, 3, 2, 1, 0, 233, 29, 1, + 0, 0, 0, 234, 236, 5, 18, 0, 0, 235, 234, 1, 0, 0, 0, 235, 236, 1, 0, 0, + 0, 236, 237, 1, 0, 0, 0, 237, 249, 5, 32, 0, 0, 238, 249, 5, 33, 0, 0, + 239, 241, 5, 18, 0, 0, 240, 239, 1, 0, 0, 0, 240, 241, 1, 0, 0, 0, 241, + 242, 1, 0, 0, 0, 242, 249, 5, 31, 0, 0, 243, 249, 5, 34, 0, 0, 244, 249, + 5, 35, 0, 0, 245, 249, 5, 26, 0, 0, 246, 249, 5, 27, 0, 0, 247, 249, 5, + 28, 0, 0, 248, 235, 1, 0, 0, 0, 248, 238, 1, 0, 0, 0, 248, 240, 1, 0, 0, + 0, 248, 243, 1, 0, 0, 0, 248, 244, 1, 0, 0, 0, 248, 245, 1, 0, 0, 0, 248, + 246, 1, 0, 0, 0, 248, 247, 1, 0, 0, 0, 249, 31, 1, 0, 0, 0, 35, 41, 48, + 56, 67, 79, 81, 88, 94, 97, 105, 113, 119, 124, 126, 130, 135, 138, 146, + 149, 154, 157, 161, 168, 173, 176, 180, 187, 195, 208, 212, 226, 230, 235, + 240, 248, +} + deserializer := antlr.NewATNDeserializer(nil) + staticData.atn = deserializer.Deserialize(staticData.serializedATN) + atn := staticData.atn + staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState)) + decisionToDFA := staticData.decisionToDFA + for index, state := range atn.DecisionToState { + decisionToDFA[index] = antlr.NewDFA(state, index) + } +} + +// CELParserInit initializes any static state used to implement CELParser. By default the +// static state used to implement the parser is lazily initialized during the first call to +// NewCELParser(). You can call this function if you wish to initialize the static state ahead +// of time. +func CELParserInit() { + staticData := &CELParserStaticData + staticData.once.Do(celParserInit) +} + +// NewCELParser produces a new parser instance for the optional input antlr.TokenStream. +func NewCELParser(input antlr.TokenStream) *CELParser { + CELParserInit() + this := new(CELParser) + this.BaseParser = antlr.NewBaseParser(input) + staticData := &CELParserStaticData + this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache) + this.RuleNames = staticData.RuleNames + this.LiteralNames = staticData.LiteralNames + this.SymbolicNames = staticData.SymbolicNames + this.GrammarFileName = "CEL.g4" + + return this +} + + +// CELParser tokens. +const ( + CELParserEOF = antlr.TokenEOF + CELParserEQUALS = 1 + CELParserNOT_EQUALS = 2 + CELParserIN = 3 + CELParserLESS = 4 + CELParserLESS_EQUALS = 5 + CELParserGREATER_EQUALS = 6 + CELParserGREATER = 7 + CELParserLOGICAL_AND = 8 + CELParserLOGICAL_OR = 9 + CELParserLBRACKET = 10 + CELParserRPRACKET = 11 + CELParserLBRACE = 12 + CELParserRBRACE = 13 + CELParserLPAREN = 14 + CELParserRPAREN = 15 + CELParserDOT = 16 + CELParserCOMMA = 17 + CELParserMINUS = 18 + CELParserEXCLAM = 19 + CELParserQUESTIONMARK = 20 + CELParserCOLON = 21 + CELParserPLUS = 22 + CELParserSTAR = 23 + CELParserSLASH = 24 + CELParserPERCENT = 25 + CELParserCEL_TRUE = 26 + CELParserCEL_FALSE = 27 + CELParserNUL = 28 + CELParserWHITESPACE = 29 + CELParserCOMMENT = 30 + CELParserNUM_FLOAT = 31 + CELParserNUM_INT = 32 + CELParserNUM_UINT = 33 + CELParserSTRING = 34 + CELParserBYTES = 35 + CELParserIDENTIFIER = 36 +) + +// CELParser rules. +const ( + CELParserRULE_start = 0 + CELParserRULE_expr = 1 + CELParserRULE_conditionalOr = 2 + CELParserRULE_conditionalAnd = 3 + CELParserRULE_relation = 4 + CELParserRULE_calc = 5 + CELParserRULE_unary = 6 + CELParserRULE_member = 7 + CELParserRULE_primary = 8 + CELParserRULE_exprList = 9 + CELParserRULE_listInit = 10 + CELParserRULE_fieldInitializerList = 11 + CELParserRULE_optField = 12 + CELParserRULE_mapInitializerList = 13 + CELParserRULE_optExpr = 14 + CELParserRULE_literal = 15 +) + +// IStartContext is an interface to support dynamic dispatch. +type IStartContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetE returns the e rule contexts. + GetE() IExprContext + + + // SetE sets the e rule contexts. + SetE(IExprContext) + + + // Getter signatures + EOF() antlr.TerminalNode + Expr() IExprContext + + // IsStartContext differentiates from other interfaces. + IsStartContext() +} + +type StartContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IExprContext +} + +func NewEmptyStartContext() *StartContext { + var p = new(StartContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_start + return p +} + +func InitEmptyStartContext(p *StartContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_start +} + +func (*StartContext) IsStartContext() {} + +func NewStartContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *StartContext { + var p = new(StartContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_start + + return p +} + +func (s *StartContext) GetParser() antlr.Parser { return s.parser } + +func (s *StartContext) GetE() IExprContext { return s.e } + + +func (s *StartContext) SetE(v IExprContext) { s.e = v } + + +func (s *StartContext) EOF() antlr.TerminalNode { + return s.GetToken(CELParserEOF, 0) +} + +func (s *StartContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *StartContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *StartContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *StartContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterStart(s) + } +} + +func (s *StartContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitStart(s) + } +} + +func (s *StartContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitStart(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) Start_() (localctx IStartContext) { + localctx = NewStartContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 0, CELParserRULE_start) + p.EnterOuterAlt(localctx, 1) + { + p.SetState(32) + + var _x = p.Expr() + + + localctx.(*StartContext).e = _x + } + { + p.SetState(33) + p.Match(CELParserEOF) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IExprContext is an interface to support dynamic dispatch. +type IExprContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOp returns the op token. + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IConditionalOrContext + + // GetE1 returns the e1 rule contexts. + GetE1() IConditionalOrContext + + // GetE2 returns the e2 rule contexts. + GetE2() IExprContext + + + // SetE sets the e rule contexts. + SetE(IConditionalOrContext) + + // SetE1 sets the e1 rule contexts. + SetE1(IConditionalOrContext) + + // SetE2 sets the e2 rule contexts. + SetE2(IExprContext) + + + // Getter signatures + AllConditionalOr() []IConditionalOrContext + ConditionalOr(i int) IConditionalOrContext + COLON() antlr.TerminalNode + QUESTIONMARK() antlr.TerminalNode + Expr() IExprContext + + // IsExprContext differentiates from other interfaces. + IsExprContext() +} + +type ExprContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IConditionalOrContext + op antlr.Token + e1 IConditionalOrContext + e2 IExprContext +} + +func NewEmptyExprContext() *ExprContext { + var p = new(ExprContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_expr + return p +} + +func InitEmptyExprContext(p *ExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_expr +} + +func (*ExprContext) IsExprContext() {} + +func NewExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprContext { + var p = new(ExprContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_expr + + return p +} + +func (s *ExprContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExprContext) GetOp() antlr.Token { return s.op } + + +func (s *ExprContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *ExprContext) GetE() IConditionalOrContext { return s.e } + +func (s *ExprContext) GetE1() IConditionalOrContext { return s.e1 } + +func (s *ExprContext) GetE2() IExprContext { return s.e2 } + + +func (s *ExprContext) SetE(v IConditionalOrContext) { s.e = v } + +func (s *ExprContext) SetE1(v IConditionalOrContext) { s.e1 = v } + +func (s *ExprContext) SetE2(v IExprContext) { s.e2 = v } + + +func (s *ExprContext) AllConditionalOr() []IConditionalOrContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IConditionalOrContext); ok { + len++ + } + } + + tst := make([]IConditionalOrContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IConditionalOrContext); ok { + tst[i] = t.(IConditionalOrContext) + i++ + } + } + + return tst +} + +func (s *ExprContext) ConditionalOr(i int) IConditionalOrContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IConditionalOrContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IConditionalOrContext) +} + +func (s *ExprContext) COLON() antlr.TerminalNode { + return s.GetToken(CELParserCOLON, 0) +} + +func (s *ExprContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + +func (s *ExprContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *ExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterExpr(s) + } +} + +func (s *ExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitExpr(s) + } +} + +func (s *ExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitExpr(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) Expr() (localctx IExprContext) { + localctx = NewExprContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 2, CELParserRULE_expr) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(35) + + var _x = p.ConditionalOr() + + + localctx.(*ExprContext).e = _x + } + p.SetState(41) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(36) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*ExprContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(37) + + var _x = p.ConditionalOr() + + + localctx.(*ExprContext).e1 = _x + } + { + p.SetState(38) + p.Match(CELParserCOLON) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(39) + + var _x = p.Expr() + + + localctx.(*ExprContext).e2 = _x + } + + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IConditionalOrContext is an interface to support dynamic dispatch. +type IConditionalOrContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS9 returns the s9 token. + GetS9() antlr.Token + + + // SetS9 sets the s9 token. + SetS9(antlr.Token) + + + // GetOps returns the ops token list. + GetOps() []antlr.Token + + + // SetOps sets the ops token list. + SetOps([]antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IConditionalAndContext + + // Get_conditionalAnd returns the _conditionalAnd rule contexts. + Get_conditionalAnd() IConditionalAndContext + + + // SetE sets the e rule contexts. + SetE(IConditionalAndContext) + + // Set_conditionalAnd sets the _conditionalAnd rule contexts. + Set_conditionalAnd(IConditionalAndContext) + + + // GetE1 returns the e1 rule context list. + GetE1() []IConditionalAndContext + + + // SetE1 sets the e1 rule context list. + SetE1([]IConditionalAndContext) + + + // Getter signatures + AllConditionalAnd() []IConditionalAndContext + ConditionalAnd(i int) IConditionalAndContext + AllLOGICAL_OR() []antlr.TerminalNode + LOGICAL_OR(i int) antlr.TerminalNode + + // IsConditionalOrContext differentiates from other interfaces. + IsConditionalOrContext() +} + +type ConditionalOrContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IConditionalAndContext + s9 antlr.Token + ops []antlr.Token + _conditionalAnd IConditionalAndContext + e1 []IConditionalAndContext +} + +func NewEmptyConditionalOrContext() *ConditionalOrContext { + var p = new(ConditionalOrContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalOr + return p +} + +func InitEmptyConditionalOrContext(p *ConditionalOrContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalOr +} + +func (*ConditionalOrContext) IsConditionalOrContext() {} + +func NewConditionalOrContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalOrContext { + var p = new(ConditionalOrContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_conditionalOr + + return p +} + +func (s *ConditionalOrContext) GetParser() antlr.Parser { return s.parser } + +func (s *ConditionalOrContext) GetS9() antlr.Token { return s.s9 } + + +func (s *ConditionalOrContext) SetS9(v antlr.Token) { s.s9 = v } + + +func (s *ConditionalOrContext) GetOps() []antlr.Token { return s.ops } + + +func (s *ConditionalOrContext) SetOps(v []antlr.Token) { s.ops = v } + + +func (s *ConditionalOrContext) GetE() IConditionalAndContext { return s.e } + +func (s *ConditionalOrContext) Get_conditionalAnd() IConditionalAndContext { return s._conditionalAnd } + + +func (s *ConditionalOrContext) SetE(v IConditionalAndContext) { s.e = v } + +func (s *ConditionalOrContext) Set_conditionalAnd(v IConditionalAndContext) { s._conditionalAnd = v } + + +func (s *ConditionalOrContext) GetE1() []IConditionalAndContext { return s.e1 } + + +func (s *ConditionalOrContext) SetE1(v []IConditionalAndContext) { s.e1 = v } + + +func (s *ConditionalOrContext) AllConditionalAnd() []IConditionalAndContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IConditionalAndContext); ok { + len++ + } + } + + tst := make([]IConditionalAndContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IConditionalAndContext); ok { + tst[i] = t.(IConditionalAndContext) + i++ + } + } + + return tst +} + +func (s *ConditionalOrContext) ConditionalAnd(i int) IConditionalAndContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IConditionalAndContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IConditionalAndContext) +} + +func (s *ConditionalOrContext) AllLOGICAL_OR() []antlr.TerminalNode { + return s.GetTokens(CELParserLOGICAL_OR) +} + +func (s *ConditionalOrContext) LOGICAL_OR(i int) antlr.TerminalNode { + return s.GetToken(CELParserLOGICAL_OR, i) +} + +func (s *ConditionalOrContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ConditionalOrContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ConditionalOrContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterConditionalOr(s) + } +} + +func (s *ConditionalOrContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitConditionalOr(s) + } +} + +func (s *ConditionalOrContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitConditionalOr(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ConditionalOr() (localctx IConditionalOrContext) { + localctx = NewConditionalOrContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 4, CELParserRULE_conditionalOr) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(43) + + var _x = p.ConditionalAnd() + + + localctx.(*ConditionalOrContext).e = _x + } + p.SetState(48) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserLOGICAL_OR { + { + p.SetState(44) + + var _m = p.Match(CELParserLOGICAL_OR) + + localctx.(*ConditionalOrContext).s9 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*ConditionalOrContext).ops = append(localctx.(*ConditionalOrContext).ops, localctx.(*ConditionalOrContext).s9) + { + p.SetState(45) + + var _x = p.ConditionalAnd() + + + localctx.(*ConditionalOrContext)._conditionalAnd = _x + } + localctx.(*ConditionalOrContext).e1 = append(localctx.(*ConditionalOrContext).e1, localctx.(*ConditionalOrContext)._conditionalAnd) + + + p.SetState(50) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IConditionalAndContext is an interface to support dynamic dispatch. +type IConditionalAndContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS8 returns the s8 token. + GetS8() antlr.Token + + + // SetS8 sets the s8 token. + SetS8(antlr.Token) + + + // GetOps returns the ops token list. + GetOps() []antlr.Token + + + // SetOps sets the ops token list. + SetOps([]antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IRelationContext + + // Get_relation returns the _relation rule contexts. + Get_relation() IRelationContext + + + // SetE sets the e rule contexts. + SetE(IRelationContext) + + // Set_relation sets the _relation rule contexts. + Set_relation(IRelationContext) + + + // GetE1 returns the e1 rule context list. + GetE1() []IRelationContext + + + // SetE1 sets the e1 rule context list. + SetE1([]IRelationContext) + + + // Getter signatures + AllRelation() []IRelationContext + Relation(i int) IRelationContext + AllLOGICAL_AND() []antlr.TerminalNode + LOGICAL_AND(i int) antlr.TerminalNode + + // IsConditionalAndContext differentiates from other interfaces. + IsConditionalAndContext() +} + +type ConditionalAndContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + e IRelationContext + s8 antlr.Token + ops []antlr.Token + _relation IRelationContext + e1 []IRelationContext +} + +func NewEmptyConditionalAndContext() *ConditionalAndContext { + var p = new(ConditionalAndContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalAnd + return p +} + +func InitEmptyConditionalAndContext(p *ConditionalAndContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_conditionalAnd +} + +func (*ConditionalAndContext) IsConditionalAndContext() {} + +func NewConditionalAndContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ConditionalAndContext { + var p = new(ConditionalAndContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_conditionalAnd + + return p +} + +func (s *ConditionalAndContext) GetParser() antlr.Parser { return s.parser } + +func (s *ConditionalAndContext) GetS8() antlr.Token { return s.s8 } + + +func (s *ConditionalAndContext) SetS8(v antlr.Token) { s.s8 = v } + + +func (s *ConditionalAndContext) GetOps() []antlr.Token { return s.ops } + + +func (s *ConditionalAndContext) SetOps(v []antlr.Token) { s.ops = v } + + +func (s *ConditionalAndContext) GetE() IRelationContext { return s.e } + +func (s *ConditionalAndContext) Get_relation() IRelationContext { return s._relation } + + +func (s *ConditionalAndContext) SetE(v IRelationContext) { s.e = v } + +func (s *ConditionalAndContext) Set_relation(v IRelationContext) { s._relation = v } + + +func (s *ConditionalAndContext) GetE1() []IRelationContext { return s.e1 } + + +func (s *ConditionalAndContext) SetE1(v []IRelationContext) { s.e1 = v } + + +func (s *ConditionalAndContext) AllRelation() []IRelationContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IRelationContext); ok { + len++ + } + } + + tst := make([]IRelationContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IRelationContext); ok { + tst[i] = t.(IRelationContext) + i++ + } + } + + return tst +} + +func (s *ConditionalAndContext) Relation(i int) IRelationContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRelationContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IRelationContext) +} + +func (s *ConditionalAndContext) AllLOGICAL_AND() []antlr.TerminalNode { + return s.GetTokens(CELParserLOGICAL_AND) +} + +func (s *ConditionalAndContext) LOGICAL_AND(i int) antlr.TerminalNode { + return s.GetToken(CELParserLOGICAL_AND, i) +} + +func (s *ConditionalAndContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ConditionalAndContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ConditionalAndContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterConditionalAnd(s) + } +} + +func (s *ConditionalAndContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitConditionalAnd(s) + } +} + +func (s *ConditionalAndContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitConditionalAnd(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ConditionalAnd() (localctx IConditionalAndContext) { + localctx = NewConditionalAndContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 6, CELParserRULE_conditionalAnd) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(51) + + var _x = p.relation(0) + + localctx.(*ConditionalAndContext).e = _x + } + p.SetState(56) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserLOGICAL_AND { + { + p.SetState(52) + + var _m = p.Match(CELParserLOGICAL_AND) + + localctx.(*ConditionalAndContext).s8 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*ConditionalAndContext).ops = append(localctx.(*ConditionalAndContext).ops, localctx.(*ConditionalAndContext).s8) + { + p.SetState(53) + + var _x = p.relation(0) + + localctx.(*ConditionalAndContext)._relation = _x + } + localctx.(*ConditionalAndContext).e1 = append(localctx.(*ConditionalAndContext).e1, localctx.(*ConditionalAndContext)._relation) + + + p.SetState(58) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IRelationContext is an interface to support dynamic dispatch. +type IRelationContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOp returns the op token. + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + + + // Getter signatures + Calc() ICalcContext + AllRelation() []IRelationContext + Relation(i int) IRelationContext + LESS() antlr.TerminalNode + LESS_EQUALS() antlr.TerminalNode + GREATER_EQUALS() antlr.TerminalNode + GREATER() antlr.TerminalNode + EQUALS() antlr.TerminalNode + NOT_EQUALS() antlr.TerminalNode + IN() antlr.TerminalNode + + // IsRelationContext differentiates from other interfaces. + IsRelationContext() +} + +type RelationContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + op antlr.Token +} + +func NewEmptyRelationContext() *RelationContext { + var p = new(RelationContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_relation + return p +} + +func InitEmptyRelationContext(p *RelationContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_relation +} + +func (*RelationContext) IsRelationContext() {} + +func NewRelationContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *RelationContext { + var p = new(RelationContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_relation + + return p +} + +func (s *RelationContext) GetParser() antlr.Parser { return s.parser } + +func (s *RelationContext) GetOp() antlr.Token { return s.op } + + +func (s *RelationContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *RelationContext) Calc() ICalcContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICalcContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(ICalcContext) +} + +func (s *RelationContext) AllRelation() []IRelationContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IRelationContext); ok { + len++ + } + } + + tst := make([]IRelationContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IRelationContext); ok { + tst[i] = t.(IRelationContext) + i++ + } + } + + return tst +} + +func (s *RelationContext) Relation(i int) IRelationContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IRelationContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IRelationContext) +} + +func (s *RelationContext) LESS() antlr.TerminalNode { + return s.GetToken(CELParserLESS, 0) +} + +func (s *RelationContext) LESS_EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserLESS_EQUALS, 0) +} + +func (s *RelationContext) GREATER_EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserGREATER_EQUALS, 0) +} + +func (s *RelationContext) GREATER() antlr.TerminalNode { + return s.GetToken(CELParserGREATER, 0) +} + +func (s *RelationContext) EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserEQUALS, 0) +} + +func (s *RelationContext) NOT_EQUALS() antlr.TerminalNode { + return s.GetToken(CELParserNOT_EQUALS, 0) +} + +func (s *RelationContext) IN() antlr.TerminalNode { + return s.GetToken(CELParserIN, 0) +} + +func (s *RelationContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *RelationContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *RelationContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterRelation(s) + } +} + +func (s *RelationContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitRelation(s) + } +} + +func (s *RelationContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitRelation(s) + + default: + return t.VisitChildren(s) + } +} + + + + + +func (p *CELParser) Relation() (localctx IRelationContext) { + return p.relation(0) +} + +func (p *CELParser) relation(_p int) (localctx IRelationContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + + _parentState := p.GetState() + localctx = NewRelationContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx IRelationContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 8 + p.EnterRecursionRule(localctx, 8, CELParserRULE_relation, _p) + var _la int + + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(60) + p.calc(0) + } + + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(67) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + localctx = NewRelationContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_relation) + p.SetState(62) + + if !(p.Precpred(p.GetParserRuleContext(), 1)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit + } + { + p.SetState(63) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*RelationContext).op = _lt + + _la = p.GetTokenStream().LA(1) + + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 254) != 0)) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*RelationContext).op = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(64) + p.relation(2) + } + + + } + p.SetState(69) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 3, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// ICalcContext is an interface to support dynamic dispatch. +type ICalcContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOp returns the op token. + GetOp() antlr.Token + + + // SetOp sets the op token. + SetOp(antlr.Token) + + + // Getter signatures + Unary() IUnaryContext + AllCalc() []ICalcContext + Calc(i int) ICalcContext + STAR() antlr.TerminalNode + SLASH() antlr.TerminalNode + PERCENT() antlr.TerminalNode + PLUS() antlr.TerminalNode + MINUS() antlr.TerminalNode + + // IsCalcContext differentiates from other interfaces. + IsCalcContext() +} + +type CalcContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + op antlr.Token +} + +func NewEmptyCalcContext() *CalcContext { + var p = new(CalcContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_calc + return p +} + +func InitEmptyCalcContext(p *CalcContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_calc +} + +func (*CalcContext) IsCalcContext() {} + +func NewCalcContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CalcContext { + var p = new(CalcContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_calc + + return p +} + +func (s *CalcContext) GetParser() antlr.Parser { return s.parser } + +func (s *CalcContext) GetOp() antlr.Token { return s.op } + + +func (s *CalcContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CalcContext) Unary() IUnaryContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IUnaryContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IUnaryContext) +} + +func (s *CalcContext) AllCalc() []ICalcContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(ICalcContext); ok { + len++ + } + } + + tst := make([]ICalcContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(ICalcContext); ok { + tst[i] = t.(ICalcContext) + i++ + } + } + + return tst +} + +func (s *CalcContext) Calc(i int) ICalcContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ICalcContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(ICalcContext) +} + +func (s *CalcContext) STAR() antlr.TerminalNode { + return s.GetToken(CELParserSTAR, 0) +} + +func (s *CalcContext) SLASH() antlr.TerminalNode { + return s.GetToken(CELParserSLASH, 0) +} + +func (s *CalcContext) PERCENT() antlr.TerminalNode { + return s.GetToken(CELParserPERCENT, 0) +} + +func (s *CalcContext) PLUS() antlr.TerminalNode { + return s.GetToken(CELParserPLUS, 0) +} + +func (s *CalcContext) MINUS() antlr.TerminalNode { + return s.GetToken(CELParserMINUS, 0) +} + +func (s *CalcContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CalcContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *CalcContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCalc(s) + } +} + +func (s *CalcContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCalc(s) + } +} + +func (s *CalcContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCalc(s) + + default: + return t.VisitChildren(s) + } +} + + + + + +func (p *CELParser) Calc() (localctx ICalcContext) { + return p.calc(0) +} + +func (p *CELParser) calc(_p int) (localctx ICalcContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + + _parentState := p.GetState() + localctx = NewCalcContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx ICalcContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 10 + p.EnterRecursionRule(localctx, 10, CELParserRULE_calc, _p) + var _la int + + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(71) + p.Unary() + } + + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(81) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + p.SetState(79) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 4, p.GetParserRuleContext()) { + case 1: + localctx = NewCalcContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) + p.SetState(73) + + if !(p.Precpred(p.GetParserRuleContext(), 2)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit + } + { + p.SetState(74) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*CalcContext).op = _lt + + _la = p.GetTokenStream().LA(1) + + if !(((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 58720256) != 0)) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*CalcContext).op = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(75) + p.calc(3) + } + + + case 2: + localctx = NewCalcContext(p, _parentctx, _parentState) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_calc) + p.SetState(76) + + if !(p.Precpred(p.GetParserRuleContext(), 1)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit + } + { + p.SetState(77) + + var _lt = p.GetTokenStream().LT(1) + + localctx.(*CalcContext).op = _lt + + _la = p.GetTokenStream().LA(1) + + if !(_la == CELParserMINUS || _la == CELParserPLUS) { + var _ri = p.GetErrorHandler().RecoverInline(p) + + localctx.(*CalcContext).op = _ri + } else { + p.GetErrorHandler().ReportMatch(p) + p.Consume() + } + } + { + p.SetState(78) + p.calc(2) + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + } + p.SetState(83) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 5, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IUnaryContext is an interface to support dynamic dispatch. +type IUnaryContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsUnaryContext differentiates from other interfaces. + IsUnaryContext() +} + +type UnaryContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyUnaryContext() *UnaryContext { + var p = new(UnaryContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_unary + return p +} + +func InitEmptyUnaryContext(p *UnaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_unary +} + +func (*UnaryContext) IsUnaryContext() {} + +func NewUnaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *UnaryContext { + var p = new(UnaryContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_unary + + return p +} + +func (s *UnaryContext) GetParser() antlr.Parser { return s.parser } + +func (s *UnaryContext) CopyAll(ctx *UnaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *UnaryContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *UnaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + +type LogicalNotContext struct { + UnaryContext + s19 antlr.Token + ops []antlr.Token +} + +func NewLogicalNotContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LogicalNotContext { + var p = new(LogicalNotContext) + + InitEmptyUnaryContext(&p.UnaryContext) + p.parser = parser + p.CopyAll(ctx.(*UnaryContext)) + + return p +} + + +func (s *LogicalNotContext) GetS19() antlr.Token { return s.s19 } + + +func (s *LogicalNotContext) SetS19(v antlr.Token) { s.s19 = v } + + +func (s *LogicalNotContext) GetOps() []antlr.Token { return s.ops } + + +func (s *LogicalNotContext) SetOps(v []antlr.Token) { s.ops = v } + +func (s *LogicalNotContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *LogicalNotContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *LogicalNotContext) AllEXCLAM() []antlr.TerminalNode { + return s.GetTokens(CELParserEXCLAM) +} + +func (s *LogicalNotContext) EXCLAM(i int) antlr.TerminalNode { + return s.GetToken(CELParserEXCLAM, i) +} + + +func (s *LogicalNotContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterLogicalNot(s) + } +} + +func (s *LogicalNotContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitLogicalNot(s) + } +} + +func (s *LogicalNotContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitLogicalNot(s) + + default: + return t.VisitChildren(s) + } +} + + +type MemberExprContext struct { + UnaryContext +} + +func NewMemberExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberExprContext { + var p = new(MemberExprContext) + + InitEmptyUnaryContext(&p.UnaryContext) + p.parser = parser + p.CopyAll(ctx.(*UnaryContext)) + + return p +} + +func (s *MemberExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MemberExprContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + + +func (s *MemberExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterMemberExpr(s) + } +} + +func (s *MemberExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitMemberExpr(s) + } +} + +func (s *MemberExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitMemberExpr(s) + + default: + return t.VisitChildren(s) + } +} + + +type NegateContext struct { + UnaryContext + s18 antlr.Token + ops []antlr.Token +} + +func NewNegateContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NegateContext { + var p = new(NegateContext) + + InitEmptyUnaryContext(&p.UnaryContext) + p.parser = parser + p.CopyAll(ctx.(*UnaryContext)) + + return p +} + + +func (s *NegateContext) GetS18() antlr.Token { return s.s18 } + + +func (s *NegateContext) SetS18(v antlr.Token) { s.s18 = v } + + +func (s *NegateContext) GetOps() []antlr.Token { return s.ops } + + +func (s *NegateContext) SetOps(v []antlr.Token) { s.ops = v } + +func (s *NegateContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NegateContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *NegateContext) AllMINUS() []antlr.TerminalNode { + return s.GetTokens(CELParserMINUS) +} + +func (s *NegateContext) MINUS(i int) antlr.TerminalNode { + return s.GetToken(CELParserMINUS, i) +} + + +func (s *NegateContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterNegate(s) + } +} + +func (s *NegateContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitNegate(s) + } +} + +func (s *NegateContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitNegate(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Unary() (localctx IUnaryContext) { + localctx = NewUnaryContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 12, CELParserRULE_unary) + var _la int + + var _alt int + + p.SetState(97) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 8, p.GetParserRuleContext()) { + case 1: + localctx = NewMemberExprContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + { + p.SetState(84) + p.member(0) + } + + + case 2: + localctx = NewLogicalNotContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + p.SetState(86) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for ok := true; ok; ok = _la == CELParserEXCLAM { + { + p.SetState(85) + + var _m = p.Match(CELParserEXCLAM) + + localctx.(*LogicalNotContext).s19 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*LogicalNotContext).ops = append(localctx.(*LogicalNotContext).ops, localctx.(*LogicalNotContext).s19) + + + p.SetState(88) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(90) + p.member(0) + } + + + case 3: + localctx = NewNegateContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + p.SetState(92) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = 1 + for ok := true; ok; ok = _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + switch _alt { + case 1: + { + p.SetState(91) + + var _m = p.Match(CELParserMINUS) + + localctx.(*NegateContext).s18 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*NegateContext).ops = append(localctx.(*NegateContext).ops, localctx.(*NegateContext).s18) + + + + + default: + p.SetError(antlr.NewNoViableAltException(p, nil, nil, nil, nil, nil)) + goto errorExit + } + + p.SetState(94) + p.GetErrorHandler().Sync(p) + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 7, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + { + p.SetState(96) + p.member(0) + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IMemberContext is an interface to support dynamic dispatch. +type IMemberContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsMemberContext differentiates from other interfaces. + IsMemberContext() +} + +type MemberContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyMemberContext() *MemberContext { + var p = new(MemberContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_member + return p +} + +func InitEmptyMemberContext(p *MemberContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_member +} + +func (*MemberContext) IsMemberContext() {} + +func NewMemberContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MemberContext { + var p = new(MemberContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_member + + return p +} + +func (s *MemberContext) GetParser() antlr.Parser { return s.parser } + +func (s *MemberContext) CopyAll(ctx *MemberContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *MemberContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MemberContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + + +type MemberCallContext struct { + MemberContext + op antlr.Token + id antlr.Token + open antlr.Token + args IExprListContext +} + +func NewMemberCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *MemberCallContext { + var p = new(MemberCallContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + + +func (s *MemberCallContext) GetOp() antlr.Token { return s.op } + +func (s *MemberCallContext) GetId() antlr.Token { return s.id } + +func (s *MemberCallContext) GetOpen() antlr.Token { return s.open } + + +func (s *MemberCallContext) SetOp(v antlr.Token) { s.op = v } + +func (s *MemberCallContext) SetId(v antlr.Token) { s.id = v } + +func (s *MemberCallContext) SetOpen(v antlr.Token) { s.open = v } + + +func (s *MemberCallContext) GetArgs() IExprListContext { return s.args } + + +func (s *MemberCallContext) SetArgs(v IExprListContext) { s.args = v } + +func (s *MemberCallContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MemberCallContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *MemberCallContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *MemberCallContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *MemberCallContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *MemberCallContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *MemberCallContext) ExprList() IExprListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprListContext) +} + + +func (s *MemberCallContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterMemberCall(s) + } +} + +func (s *MemberCallContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitMemberCall(s) + } +} + +func (s *MemberCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitMemberCall(s) + + default: + return t.VisitChildren(s) + } +} + + +type SelectContext struct { + MemberContext + op antlr.Token + opt antlr.Token + id antlr.Token +} + +func NewSelectContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SelectContext { + var p = new(SelectContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + + +func (s *SelectContext) GetOp() antlr.Token { return s.op } + +func (s *SelectContext) GetOpt() antlr.Token { return s.opt } + +func (s *SelectContext) GetId() antlr.Token { return s.id } + + +func (s *SelectContext) SetOp(v antlr.Token) { s.op = v } + +func (s *SelectContext) SetOpt(v antlr.Token) { s.opt = v } + +func (s *SelectContext) SetId(v antlr.Token) { s.id = v } + +func (s *SelectContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *SelectContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *SelectContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *SelectContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *SelectContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + + +func (s *SelectContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterSelect(s) + } +} + +func (s *SelectContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitSelect(s) + } +} + +func (s *SelectContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitSelect(s) + + default: + return t.VisitChildren(s) + } +} + + +type PrimaryExprContext struct { + MemberContext +} + +func NewPrimaryExprContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *PrimaryExprContext { + var p = new(PrimaryExprContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + +func (s *PrimaryExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *PrimaryExprContext) Primary() IPrimaryContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IPrimaryContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IPrimaryContext) +} + + +func (s *PrimaryExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterPrimaryExpr(s) + } +} + +func (s *PrimaryExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitPrimaryExpr(s) + } +} + +func (s *PrimaryExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitPrimaryExpr(s) + + default: + return t.VisitChildren(s) + } +} + + +type IndexContext struct { + MemberContext + op antlr.Token + opt antlr.Token + index IExprContext +} + +func NewIndexContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IndexContext { + var p = new(IndexContext) + + InitEmptyMemberContext(&p.MemberContext) + p.parser = parser + p.CopyAll(ctx.(*MemberContext)) + + return p +} + + +func (s *IndexContext) GetOp() antlr.Token { return s.op } + +func (s *IndexContext) GetOpt() antlr.Token { return s.opt } + + +func (s *IndexContext) SetOp(v antlr.Token) { s.op = v } + +func (s *IndexContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *IndexContext) GetIndex() IExprContext { return s.index } + + +func (s *IndexContext) SetIndex(v IExprContext) { s.index = v } + +func (s *IndexContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IndexContext) Member() IMemberContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMemberContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMemberContext) +} + +func (s *IndexContext) RPRACKET() antlr.TerminalNode { + return s.GetToken(CELParserRPRACKET, 0) +} + +func (s *IndexContext) LBRACKET() antlr.TerminalNode { + return s.GetToken(CELParserLBRACKET, 0) +} + +func (s *IndexContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *IndexContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + + +func (s *IndexContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterIndex(s) + } +} + +func (s *IndexContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitIndex(s) + } +} + +func (s *IndexContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitIndex(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Member() (localctx IMemberContext) { + return p.member(0) +} + +func (p *CELParser) member(_p int) (localctx IMemberContext) { + var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext() + + _parentState := p.GetState() + localctx = NewMemberContext(p, p.GetParserRuleContext(), _parentState) + var _prevctx IMemberContext = localctx + var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning. + _startState := 14 + p.EnterRecursionRule(localctx, 14, CELParserRULE_member, _p) + var _la int + + var _alt int + + p.EnterOuterAlt(localctx, 1) + localctx = NewPrimaryExprContext(p, localctx) + p.SetParserRuleContext(localctx) + _prevctx = localctx + + { + p.SetState(100) + p.Primary() + } + + p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1)) + p.SetState(126) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + if p.GetParseListeners() != nil { + p.TriggerExitRuleEvent() + } + _prevctx = localctx + p.SetState(124) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 12, p.GetParserRuleContext()) { + case 1: + localctx = NewSelectContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(102) + + if !(p.Precpred(p.GetParserRuleContext(), 3)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 3)", "")) + goto errorExit + } + { + p.SetState(103) + + var _m = p.Match(CELParserDOT) + + localctx.(*SelectContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(105) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(104) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*SelectContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(107) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*SelectContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 2: + localctx = NewMemberCallContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(108) + + if !(p.Precpred(p.GetParserRuleContext(), 2)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 2)", "")) + goto errorExit + } + { + p.SetState(109) + + var _m = p.Match(CELParserDOT) + + localctx.(*MemberCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(110) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*MemberCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(111) + + var _m = p.Match(CELParserLPAREN) + + localctx.(*MemberCallContext).open = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(113) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { + { + p.SetState(112) + + var _x = p.ExprList() + + + localctx.(*MemberCallContext).args = _x + } + + } + { + p.SetState(115) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewIndexContext(p, NewMemberContext(p, _parentctx, _parentState)) + p.PushNewRecursionContext(localctx, _startState, CELParserRULE_member) + p.SetState(116) + + if !(p.Precpred(p.GetParserRuleContext(), 1)) { + p.SetError(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 1)", "")) + goto errorExit + } + { + p.SetState(117) + + var _m = p.Match(CELParserLBRACKET) + + localctx.(*IndexContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(119) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(118) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*IndexContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(121) + + var _x = p.Expr() + + + localctx.(*IndexContext).index = _x + } + { + p.SetState(122) + p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + } + p.SetState(128) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 13, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + + errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.UnrollRecursionContexts(_parentctx) + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IPrimaryContext is an interface to support dynamic dispatch. +type IPrimaryContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsPrimaryContext differentiates from other interfaces. + IsPrimaryContext() +} + +type PrimaryContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyPrimaryContext() *PrimaryContext { + var p = new(PrimaryContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_primary + return p +} + +func InitEmptyPrimaryContext(p *PrimaryContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_primary +} + +func (*PrimaryContext) IsPrimaryContext() {} + +func NewPrimaryContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *PrimaryContext { + var p = new(PrimaryContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_primary + + return p +} + +func (s *PrimaryContext) GetParser() antlr.Parser { return s.parser } + +func (s *PrimaryContext) CopyAll(ctx *PrimaryContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *PrimaryContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *PrimaryContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + +type CreateListContext struct { + PrimaryContext + op antlr.Token + elems IListInitContext +} + +func NewCreateListContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateListContext { + var p = new(CreateListContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *CreateListContext) GetOp() antlr.Token { return s.op } + + +func (s *CreateListContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CreateListContext) GetElems() IListInitContext { return s.elems } + + +func (s *CreateListContext) SetElems(v IListInitContext) { s.elems = v } + +func (s *CreateListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CreateListContext) RPRACKET() antlr.TerminalNode { + return s.GetToken(CELParserRPRACKET, 0) +} + +func (s *CreateListContext) LBRACKET() antlr.TerminalNode { + return s.GetToken(CELParserLBRACKET, 0) +} + +func (s *CreateListContext) COMMA() antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, 0) +} + +func (s *CreateListContext) ListInit() IListInitContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IListInitContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IListInitContext) +} + + +func (s *CreateListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCreateList(s) + } +} + +func (s *CreateListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCreateList(s) + } +} + +func (s *CreateListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCreateList(s) + + default: + return t.VisitChildren(s) + } +} + + +type CreateStructContext struct { + PrimaryContext + op antlr.Token + entries IMapInitializerListContext +} + +func NewCreateStructContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateStructContext { + var p = new(CreateStructContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *CreateStructContext) GetOp() antlr.Token { return s.op } + + +func (s *CreateStructContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CreateStructContext) GetEntries() IMapInitializerListContext { return s.entries } + + +func (s *CreateStructContext) SetEntries(v IMapInitializerListContext) { s.entries = v } + +func (s *CreateStructContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CreateStructContext) RBRACE() antlr.TerminalNode { + return s.GetToken(CELParserRBRACE, 0) +} + +func (s *CreateStructContext) LBRACE() antlr.TerminalNode { + return s.GetToken(CELParserLBRACE, 0) +} + +func (s *CreateStructContext) COMMA() antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, 0) +} + +func (s *CreateStructContext) MapInitializerList() IMapInitializerListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IMapInitializerListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IMapInitializerListContext) +} + + +func (s *CreateStructContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCreateStruct(s) + } +} + +func (s *CreateStructContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCreateStruct(s) + } +} + +func (s *CreateStructContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCreateStruct(s) + + default: + return t.VisitChildren(s) + } +} + + +type ConstantLiteralContext struct { + PrimaryContext +} + +func NewConstantLiteralContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ConstantLiteralContext { + var p = new(ConstantLiteralContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + +func (s *ConstantLiteralContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ConstantLiteralContext) Literal() ILiteralContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(ILiteralContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(ILiteralContext) +} + + +func (s *ConstantLiteralContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterConstantLiteral(s) + } +} + +func (s *ConstantLiteralContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitConstantLiteral(s) + } +} + +func (s *ConstantLiteralContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitConstantLiteral(s) + + default: + return t.VisitChildren(s) + } +} + + +type NestedContext struct { + PrimaryContext + e IExprContext +} + +func NewNestedContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NestedContext { + var p = new(NestedContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *NestedContext) GetE() IExprContext { return s.e } + + +func (s *NestedContext) SetE(v IExprContext) { s.e = v } + +func (s *NestedContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NestedContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *NestedContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *NestedContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + + +func (s *NestedContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterNested(s) + } +} + +func (s *NestedContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitNested(s) + } +} + +func (s *NestedContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitNested(s) + + default: + return t.VisitChildren(s) + } +} + + +type CreateMessageContext struct { + PrimaryContext + leadingDot antlr.Token + _IDENTIFIER antlr.Token + ids []antlr.Token + s16 antlr.Token + ops []antlr.Token + op antlr.Token + entries IFieldInitializerListContext +} + +func NewCreateMessageContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *CreateMessageContext { + var p = new(CreateMessageContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *CreateMessageContext) GetLeadingDot() antlr.Token { return s.leadingDot } + +func (s *CreateMessageContext) Get_IDENTIFIER() antlr.Token { return s._IDENTIFIER } + +func (s *CreateMessageContext) GetS16() antlr.Token { return s.s16 } + +func (s *CreateMessageContext) GetOp() antlr.Token { return s.op } + + +func (s *CreateMessageContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } + +func (s *CreateMessageContext) Set_IDENTIFIER(v antlr.Token) { s._IDENTIFIER = v } + +func (s *CreateMessageContext) SetS16(v antlr.Token) { s.s16 = v } + +func (s *CreateMessageContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *CreateMessageContext) GetIds() []antlr.Token { return s.ids } + +func (s *CreateMessageContext) GetOps() []antlr.Token { return s.ops } + + +func (s *CreateMessageContext) SetIds(v []antlr.Token) { s.ids = v } + +func (s *CreateMessageContext) SetOps(v []antlr.Token) { s.ops = v } + + +func (s *CreateMessageContext) GetEntries() IFieldInitializerListContext { return s.entries } + + +func (s *CreateMessageContext) SetEntries(v IFieldInitializerListContext) { s.entries = v } + +func (s *CreateMessageContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *CreateMessageContext) RBRACE() antlr.TerminalNode { + return s.GetToken(CELParserRBRACE, 0) +} + +func (s *CreateMessageContext) AllIDENTIFIER() []antlr.TerminalNode { + return s.GetTokens(CELParserIDENTIFIER) +} + +func (s *CreateMessageContext) IDENTIFIER(i int) antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, i) +} + +func (s *CreateMessageContext) LBRACE() antlr.TerminalNode { + return s.GetToken(CELParserLBRACE, 0) +} + +func (s *CreateMessageContext) COMMA() antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, 0) +} + +func (s *CreateMessageContext) AllDOT() []antlr.TerminalNode { + return s.GetTokens(CELParserDOT) +} + +func (s *CreateMessageContext) DOT(i int) antlr.TerminalNode { + return s.GetToken(CELParserDOT, i) +} + +func (s *CreateMessageContext) FieldInitializerList() IFieldInitializerListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IFieldInitializerListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IFieldInitializerListContext) +} + + +func (s *CreateMessageContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterCreateMessage(s) + } +} + +func (s *CreateMessageContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitCreateMessage(s) + } +} + +func (s *CreateMessageContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitCreateMessage(s) + + default: + return t.VisitChildren(s) + } +} + + +type IdentOrGlobalCallContext struct { + PrimaryContext + leadingDot antlr.Token + id antlr.Token + op antlr.Token + args IExprListContext +} + +func NewIdentOrGlobalCallContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IdentOrGlobalCallContext { + var p = new(IdentOrGlobalCallContext) + + InitEmptyPrimaryContext(&p.PrimaryContext) + p.parser = parser + p.CopyAll(ctx.(*PrimaryContext)) + + return p +} + + +func (s *IdentOrGlobalCallContext) GetLeadingDot() antlr.Token { return s.leadingDot } + +func (s *IdentOrGlobalCallContext) GetId() antlr.Token { return s.id } + +func (s *IdentOrGlobalCallContext) GetOp() antlr.Token { return s.op } + + +func (s *IdentOrGlobalCallContext) SetLeadingDot(v antlr.Token) { s.leadingDot = v } + +func (s *IdentOrGlobalCallContext) SetId(v antlr.Token) { s.id = v } + +func (s *IdentOrGlobalCallContext) SetOp(v antlr.Token) { s.op = v } + + +func (s *IdentOrGlobalCallContext) GetArgs() IExprListContext { return s.args } + + +func (s *IdentOrGlobalCallContext) SetArgs(v IExprListContext) { s.args = v } + +func (s *IdentOrGlobalCallContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IdentOrGlobalCallContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *IdentOrGlobalCallContext) RPAREN() antlr.TerminalNode { + return s.GetToken(CELParserRPAREN, 0) +} + +func (s *IdentOrGlobalCallContext) DOT() antlr.TerminalNode { + return s.GetToken(CELParserDOT, 0) +} + +func (s *IdentOrGlobalCallContext) LPAREN() antlr.TerminalNode { + return s.GetToken(CELParserLPAREN, 0) +} + +func (s *IdentOrGlobalCallContext) ExprList() IExprListContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprListContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprListContext) +} + + +func (s *IdentOrGlobalCallContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterIdentOrGlobalCall(s) + } +} + +func (s *IdentOrGlobalCallContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitIdentOrGlobalCall(s) + } +} + +func (s *IdentOrGlobalCallContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitIdentOrGlobalCall(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Primary() (localctx IPrimaryContext) { + localctx = NewPrimaryContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 16, CELParserRULE_primary) + var _la int + + p.SetState(180) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 25, p.GetParserRuleContext()) { + case 1: + localctx = NewIdentOrGlobalCallContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + p.SetState(130) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserDOT { + { + p.SetState(129) + + var _m = p.Match(CELParserDOT) + + localctx.(*IdentOrGlobalCallContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(132) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*IdentOrGlobalCallContext).id = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(138) + p.GetErrorHandler().Sync(p) + + + if p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 16, p.GetParserRuleContext()) == 1 { + { + p.SetState(133) + + var _m = p.Match(CELParserLPAREN) + + localctx.(*IdentOrGlobalCallContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(135) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135762105344) != 0) { + { + p.SetState(134) + + var _x = p.ExprList() + + + localctx.(*IdentOrGlobalCallContext).args = _x + } + + } + { + p.SetState(137) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } else if p.HasError() { // JIM + goto errorExit + } + + + case 2: + localctx = NewNestedContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + { + p.SetState(140) + p.Match(CELParserLPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(141) + + var _x = p.Expr() + + + localctx.(*NestedContext).e = _x + } + { + p.SetState(142) + p.Match(CELParserRPAREN) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewCreateListContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + { + p.SetState(144) + + var _m = p.Match(CELParserLBRACKET) + + localctx.(*CreateListContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(146) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { + { + p.SetState(145) + + var _x = p.ListInit() + + + localctx.(*CreateListContext).elems = _x + } + + } + p.SetState(149) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(148) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(151) + p.Match(CELParserRPRACKET) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 4: + localctx = NewCreateStructContext(p, localctx) + p.EnterOuterAlt(localctx, 4) + { + p.SetState(152) + + var _m = p.Match(CELParserLBRACE) + + localctx.(*CreateStructContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(154) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if ((int64(_la) & ^0x3f) == 0 && ((int64(1) << _la) & 135763153920) != 0) { + { + p.SetState(153) + + var _x = p.MapInitializerList() + + + localctx.(*CreateStructContext).entries = _x + } + + } + p.SetState(157) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(156) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(159) + p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 5: + localctx = NewCreateMessageContext(p, localctx) + p.EnterOuterAlt(localctx, 5) + p.SetState(161) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserDOT { + { + p.SetState(160) + + var _m = p.Match(CELParserDOT) + + localctx.(*CreateMessageContext).leadingDot = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(163) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + p.SetState(168) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserDOT { + { + p.SetState(164) + + var _m = p.Match(CELParserDOT) + + localctx.(*CreateMessageContext).s16 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ops = append(localctx.(*CreateMessageContext).ops, localctx.(*CreateMessageContext).s16) + { + p.SetState(165) + + var _m = p.Match(CELParserIDENTIFIER) + + localctx.(*CreateMessageContext)._IDENTIFIER = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*CreateMessageContext).ids = append(localctx.(*CreateMessageContext).ids, localctx.(*CreateMessageContext)._IDENTIFIER) + + + p.SetState(170) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + { + p.SetState(171) + + var _m = p.Match(CELParserLBRACE) + + localctx.(*CreateMessageContext).op = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + p.SetState(173) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK || _la == CELParserIDENTIFIER { + { + p.SetState(172) + + var _x = p.FieldInitializerList() + + + localctx.(*CreateMessageContext).entries = _x + } + + } + p.SetState(176) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserCOMMA { + { + p.SetState(175) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(178) + p.Match(CELParserRBRACE) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 6: + localctx = NewConstantLiteralContext(p, localctx) + p.EnterOuterAlt(localctx, 6) + { + p.SetState(179) + p.Literal() + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IExprListContext is an interface to support dynamic dispatch. +type IExprListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetE returns the e rule context list. + GetE() []IExprContext + + + // SetE sets the e rule context list. + SetE([]IExprContext) + + + // Getter signatures + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsExprListContext differentiates from other interfaces. + IsExprListContext() +} + +type ExprListContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _expr IExprContext + e []IExprContext +} + +func NewEmptyExprListContext() *ExprListContext { + var p = new(ExprListContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_exprList + return p +} + +func InitEmptyExprListContext(p *ExprListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_exprList +} + +func (*ExprListContext) IsExprListContext() {} + +func NewExprListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExprListContext { + var p = new(ExprListContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_exprList + + return p +} + +func (s *ExprListContext) GetParser() antlr.Parser { return s.parser } + +func (s *ExprListContext) Get_expr() IExprContext { return s._expr } + + +func (s *ExprListContext) Set_expr(v IExprContext) { s._expr = v } + + +func (s *ExprListContext) GetE() []IExprContext { return s.e } + + +func (s *ExprListContext) SetE(v []IExprContext) { s.e = v } + + +func (s *ExprListContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *ExprListContext) Expr(i int) IExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *ExprListContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *ExprListContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *ExprListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ExprListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ExprListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterExprList(s) + } +} + +func (s *ExprListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitExprList(s) + } +} + +func (s *ExprListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitExprList(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ExprList() (localctx IExprListContext) { + localctx = NewExprListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 18, CELParserRULE_exprList) + var _la int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(182) + + var _x = p.Expr() + + + localctx.(*ExprListContext)._expr = _x + } + localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + p.SetState(187) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + for _la == CELParserCOMMA { + { + p.SetState(183) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(184) + + var _x = p.Expr() + + + localctx.(*ExprListContext)._expr = _x + } + localctx.(*ExprListContext).e = append(localctx.(*ExprListContext).e, localctx.(*ExprListContext)._expr) + + + p.SetState(189) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IListInitContext is an interface to support dynamic dispatch. +type IListInitContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // Get_optExpr returns the _optExpr rule contexts. + Get_optExpr() IOptExprContext + + + // Set_optExpr sets the _optExpr rule contexts. + Set_optExpr(IOptExprContext) + + + // GetElems returns the elems rule context list. + GetElems() []IOptExprContext + + + // SetElems sets the elems rule context list. + SetElems([]IOptExprContext) + + + // Getter signatures + AllOptExpr() []IOptExprContext + OptExpr(i int) IOptExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsListInitContext differentiates from other interfaces. + IsListInitContext() +} + +type ListInitContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + elems []IOptExprContext +} + +func NewEmptyListInitContext() *ListInitContext { + var p = new(ListInitContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_listInit + return p +} + +func InitEmptyListInitContext(p *ListInitContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_listInit +} + +func (*ListInitContext) IsListInitContext() {} + +func NewListInitContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ListInitContext { + var p = new(ListInitContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_listInit + + return p +} + +func (s *ListInitContext) GetParser() antlr.Parser { return s.parser } + +func (s *ListInitContext) Get_optExpr() IOptExprContext { return s._optExpr } + + +func (s *ListInitContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + + +func (s *ListInitContext) GetElems() []IOptExprContext { return s.elems } + + +func (s *ListInitContext) SetElems(v []IOptExprContext) { s.elems = v } + + +func (s *ListInitContext) AllOptExpr() []IOptExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOptExprContext); ok { + len++ + } + } + + tst := make([]IOptExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOptExprContext); ok { + tst[i] = t.(IOptExprContext) + i++ + } + } + + return tst +} + +func (s *ListInitContext) OptExpr(i int) IOptExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOptExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOptExprContext) +} + +func (s *ListInitContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *ListInitContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *ListInitContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *ListInitContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *ListInitContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterListInit(s) + } +} + +func (s *ListInitContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitListInit(s) + } +} + +func (s *ListInitContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitListInit(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) ListInit() (localctx IListInitContext) { + localctx = NewListInitContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 20, CELParserRULE_listInit) + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(190) + + var _x = p.OptExpr() + + + localctx.(*ListInitContext)._optExpr = _x + } + localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + p.SetState(195) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(191) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(192) + + var _x = p.OptExpr() + + + localctx.(*ListInitContext)._optExpr = _x + } + localctx.(*ListInitContext).elems = append(localctx.(*ListInitContext).elems, localctx.(*ListInitContext)._optExpr) + + + } + p.SetState(197) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 27, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IFieldInitializerListContext is an interface to support dynamic dispatch. +type IFieldInitializerListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS21 returns the s21 token. + GetS21() antlr.Token + + + // SetS21 sets the s21 token. + SetS21(antlr.Token) + + + // GetCols returns the cols token list. + GetCols() []antlr.Token + + + // SetCols sets the cols token list. + SetCols([]antlr.Token) + + + // Get_optField returns the _optField rule contexts. + Get_optField() IOptFieldContext + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + + // Set_optField sets the _optField rule contexts. + Set_optField(IOptFieldContext) + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetFields returns the fields rule context list. + GetFields() []IOptFieldContext + + // GetValues returns the values rule context list. + GetValues() []IExprContext + + + // SetFields sets the fields rule context list. + SetFields([]IOptFieldContext) + + // SetValues sets the values rule context list. + SetValues([]IExprContext) + + + // Getter signatures + AllOptField() []IOptFieldContext + OptField(i int) IOptFieldContext + AllCOLON() []antlr.TerminalNode + COLON(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsFieldInitializerListContext differentiates from other interfaces. + IsFieldInitializerListContext() +} + +type FieldInitializerListContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _optField IOptFieldContext + fields []IOptFieldContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext +} + +func NewEmptyFieldInitializerListContext() *FieldInitializerListContext { + var p = new(FieldInitializerListContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_fieldInitializerList + return p +} + +func InitEmptyFieldInitializerListContext(p *FieldInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_fieldInitializerList +} + +func (*FieldInitializerListContext) IsFieldInitializerListContext() {} + +func NewFieldInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *FieldInitializerListContext { + var p = new(FieldInitializerListContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_fieldInitializerList + + return p +} + +func (s *FieldInitializerListContext) GetParser() antlr.Parser { return s.parser } + +func (s *FieldInitializerListContext) GetS21() antlr.Token { return s.s21 } + + +func (s *FieldInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + + +func (s *FieldInitializerListContext) GetCols() []antlr.Token { return s.cols } + + +func (s *FieldInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + + +func (s *FieldInitializerListContext) Get_optField() IOptFieldContext { return s._optField } + +func (s *FieldInitializerListContext) Get_expr() IExprContext { return s._expr } + + +func (s *FieldInitializerListContext) Set_optField(v IOptFieldContext) { s._optField = v } + +func (s *FieldInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + + +func (s *FieldInitializerListContext) GetFields() []IOptFieldContext { return s.fields } + +func (s *FieldInitializerListContext) GetValues() []IExprContext { return s.values } + + +func (s *FieldInitializerListContext) SetFields(v []IOptFieldContext) { s.fields = v } + +func (s *FieldInitializerListContext) SetValues(v []IExprContext) { s.values = v } + + +func (s *FieldInitializerListContext) AllOptField() []IOptFieldContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOptFieldContext); ok { + len++ + } + } + + tst := make([]IOptFieldContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOptFieldContext); ok { + tst[i] = t.(IOptFieldContext) + i++ + } + } + + return tst +} + +func (s *FieldInitializerListContext) OptField(i int) IOptFieldContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOptFieldContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOptFieldContext) +} + +func (s *FieldInitializerListContext) AllCOLON() []antlr.TerminalNode { + return s.GetTokens(CELParserCOLON) +} + +func (s *FieldInitializerListContext) COLON(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOLON, i) +} + +func (s *FieldInitializerListContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *FieldInitializerListContext) Expr(i int) IExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *FieldInitializerListContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *FieldInitializerListContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *FieldInitializerListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *FieldInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *FieldInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterFieldInitializerList(s) + } +} + +func (s *FieldInitializerListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitFieldInitializerList(s) + } +} + +func (s *FieldInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitFieldInitializerList(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) FieldInitializerList() (localctx IFieldInitializerListContext) { + localctx = NewFieldInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 22, CELParserRULE_fieldInitializerList) + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(198) + + var _x = p.OptField() + + + localctx.(*FieldInitializerListContext)._optField = _x + } + localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) + { + p.SetState(199) + + var _m = p.Match(CELParserCOLON) + + localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) + { + p.SetState(200) + + var _x = p.Expr() + + + localctx.(*FieldInitializerListContext)._expr = _x + } + localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + p.SetState(208) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(201) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(202) + + var _x = p.OptField() + + + localctx.(*FieldInitializerListContext)._optField = _x + } + localctx.(*FieldInitializerListContext).fields = append(localctx.(*FieldInitializerListContext).fields, localctx.(*FieldInitializerListContext)._optField) + { + p.SetState(203) + + var _m = p.Match(CELParserCOLON) + + localctx.(*FieldInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*FieldInitializerListContext).cols = append(localctx.(*FieldInitializerListContext).cols, localctx.(*FieldInitializerListContext).s21) + { + p.SetState(204) + + var _x = p.Expr() + + + localctx.(*FieldInitializerListContext)._expr = _x + } + localctx.(*FieldInitializerListContext).values = append(localctx.(*FieldInitializerListContext).values, localctx.(*FieldInitializerListContext)._expr) + + + } + p.SetState(210) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 28, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IOptFieldContext is an interface to support dynamic dispatch. +type IOptFieldContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOpt returns the opt token. + GetOpt() antlr.Token + + + // SetOpt sets the opt token. + SetOpt(antlr.Token) + + + // Getter signatures + IDENTIFIER() antlr.TerminalNode + QUESTIONMARK() antlr.TerminalNode + + // IsOptFieldContext differentiates from other interfaces. + IsOptFieldContext() +} + +type OptFieldContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + opt antlr.Token +} + +func NewEmptyOptFieldContext() *OptFieldContext { + var p = new(OptFieldContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optField + return p +} + +func InitEmptyOptFieldContext(p *OptFieldContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optField +} + +func (*OptFieldContext) IsOptFieldContext() {} + +func NewOptFieldContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptFieldContext { + var p = new(OptFieldContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_optField + + return p +} + +func (s *OptFieldContext) GetParser() antlr.Parser { return s.parser } + +func (s *OptFieldContext) GetOpt() antlr.Token { return s.opt } + + +func (s *OptFieldContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *OptFieldContext) IDENTIFIER() antlr.TerminalNode { + return s.GetToken(CELParserIDENTIFIER, 0) +} + +func (s *OptFieldContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + +func (s *OptFieldContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *OptFieldContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *OptFieldContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterOptField(s) + } +} + +func (s *OptFieldContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitOptField(s) + } +} + +func (s *OptFieldContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitOptField(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) OptField() (localctx IOptFieldContext) { + localctx = NewOptFieldContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 24, CELParserRULE_optField) + var _la int + + p.EnterOuterAlt(localctx, 1) + p.SetState(212) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(211) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*OptFieldContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(214) + p.Match(CELParserIDENTIFIER) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IMapInitializerListContext is an interface to support dynamic dispatch. +type IMapInitializerListContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetS21 returns the s21 token. + GetS21() antlr.Token + + + // SetS21 sets the s21 token. + SetS21(antlr.Token) + + + // GetCols returns the cols token list. + GetCols() []antlr.Token + + + // SetCols sets the cols token list. + SetCols([]antlr.Token) + + + // Get_optExpr returns the _optExpr rule contexts. + Get_optExpr() IOptExprContext + + // Get_expr returns the _expr rule contexts. + Get_expr() IExprContext + + + // Set_optExpr sets the _optExpr rule contexts. + Set_optExpr(IOptExprContext) + + // Set_expr sets the _expr rule contexts. + Set_expr(IExprContext) + + + // GetKeys returns the keys rule context list. + GetKeys() []IOptExprContext + + // GetValues returns the values rule context list. + GetValues() []IExprContext + + + // SetKeys sets the keys rule context list. + SetKeys([]IOptExprContext) + + // SetValues sets the values rule context list. + SetValues([]IExprContext) + + + // Getter signatures + AllOptExpr() []IOptExprContext + OptExpr(i int) IOptExprContext + AllCOLON() []antlr.TerminalNode + COLON(i int) antlr.TerminalNode + AllExpr() []IExprContext + Expr(i int) IExprContext + AllCOMMA() []antlr.TerminalNode + COMMA(i int) antlr.TerminalNode + + // IsMapInitializerListContext differentiates from other interfaces. + IsMapInitializerListContext() +} + +type MapInitializerListContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + _optExpr IOptExprContext + keys []IOptExprContext + s21 antlr.Token + cols []antlr.Token + _expr IExprContext + values []IExprContext +} + +func NewEmptyMapInitializerListContext() *MapInitializerListContext { + var p = new(MapInitializerListContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_mapInitializerList + return p +} + +func InitEmptyMapInitializerListContext(p *MapInitializerListContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_mapInitializerList +} + +func (*MapInitializerListContext) IsMapInitializerListContext() {} + +func NewMapInitializerListContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *MapInitializerListContext { + var p = new(MapInitializerListContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_mapInitializerList + + return p +} + +func (s *MapInitializerListContext) GetParser() antlr.Parser { return s.parser } + +func (s *MapInitializerListContext) GetS21() antlr.Token { return s.s21 } + + +func (s *MapInitializerListContext) SetS21(v antlr.Token) { s.s21 = v } + + +func (s *MapInitializerListContext) GetCols() []antlr.Token { return s.cols } + + +func (s *MapInitializerListContext) SetCols(v []antlr.Token) { s.cols = v } + + +func (s *MapInitializerListContext) Get_optExpr() IOptExprContext { return s._optExpr } + +func (s *MapInitializerListContext) Get_expr() IExprContext { return s._expr } + + +func (s *MapInitializerListContext) Set_optExpr(v IOptExprContext) { s._optExpr = v } + +func (s *MapInitializerListContext) Set_expr(v IExprContext) { s._expr = v } + + +func (s *MapInitializerListContext) GetKeys() []IOptExprContext { return s.keys } + +func (s *MapInitializerListContext) GetValues() []IExprContext { return s.values } + + +func (s *MapInitializerListContext) SetKeys(v []IOptExprContext) { s.keys = v } + +func (s *MapInitializerListContext) SetValues(v []IExprContext) { s.values = v } + + +func (s *MapInitializerListContext) AllOptExpr() []IOptExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IOptExprContext); ok { + len++ + } + } + + tst := make([]IOptExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IOptExprContext); ok { + tst[i] = t.(IOptExprContext) + i++ + } + } + + return tst +} + +func (s *MapInitializerListContext) OptExpr(i int) IOptExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IOptExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IOptExprContext) +} + +func (s *MapInitializerListContext) AllCOLON() []antlr.TerminalNode { + return s.GetTokens(CELParserCOLON) +} + +func (s *MapInitializerListContext) COLON(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOLON, i) +} + +func (s *MapInitializerListContext) AllExpr() []IExprContext { + children := s.GetChildren() + len := 0 + for _, ctx := range children { + if _, ok := ctx.(IExprContext); ok { + len++ + } + } + + tst := make([]IExprContext, len) + i := 0 + for _, ctx := range children { + if t, ok := ctx.(IExprContext); ok { + tst[i] = t.(IExprContext) + i++ + } + } + + return tst +} + +func (s *MapInitializerListContext) Expr(i int) IExprContext { + var t antlr.RuleContext; + j := 0 + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + if j == i { + t = ctx.(antlr.RuleContext); + break + } + j++ + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *MapInitializerListContext) AllCOMMA() []antlr.TerminalNode { + return s.GetTokens(CELParserCOMMA) +} + +func (s *MapInitializerListContext) COMMA(i int) antlr.TerminalNode { + return s.GetToken(CELParserCOMMA, i) +} + +func (s *MapInitializerListContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *MapInitializerListContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *MapInitializerListContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterMapInitializerList(s) + } +} + +func (s *MapInitializerListContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitMapInitializerList(s) + } +} + +func (s *MapInitializerListContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitMapInitializerList(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) MapInitializerList() (localctx IMapInitializerListContext) { + localctx = NewMapInitializerListContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 26, CELParserRULE_mapInitializerList) + var _alt int + + p.EnterOuterAlt(localctx, 1) + { + p.SetState(216) + + var _x = p.OptExpr() + + + localctx.(*MapInitializerListContext)._optExpr = _x + } + localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) + { + p.SetState(217) + + var _m = p.Match(CELParserCOLON) + + localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) + { + p.SetState(218) + + var _x = p.Expr() + + + localctx.(*MapInitializerListContext)._expr = _x + } + localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + p.SetState(226) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + for _alt != 2 && _alt != antlr.ATNInvalidAltNumber { + if _alt == 1 { + { + p.SetState(219) + p.Match(CELParserCOMMA) + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + { + p.SetState(220) + + var _x = p.OptExpr() + + + localctx.(*MapInitializerListContext)._optExpr = _x + } + localctx.(*MapInitializerListContext).keys = append(localctx.(*MapInitializerListContext).keys, localctx.(*MapInitializerListContext)._optExpr) + { + p.SetState(221) + + var _m = p.Match(CELParserCOLON) + + localctx.(*MapInitializerListContext).s21 = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + localctx.(*MapInitializerListContext).cols = append(localctx.(*MapInitializerListContext).cols, localctx.(*MapInitializerListContext).s21) + { + p.SetState(222) + + var _x = p.Expr() + + + localctx.(*MapInitializerListContext)._expr = _x + } + localctx.(*MapInitializerListContext).values = append(localctx.(*MapInitializerListContext).values, localctx.(*MapInitializerListContext)._expr) + + + } + p.SetState(228) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _alt = p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 30, p.GetParserRuleContext()) + if p.HasError() { + goto errorExit + } + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// IOptExprContext is an interface to support dynamic dispatch. +type IOptExprContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + + // GetOpt returns the opt token. + GetOpt() antlr.Token + + + // SetOpt sets the opt token. + SetOpt(antlr.Token) + + + // GetE returns the e rule contexts. + GetE() IExprContext + + + // SetE sets the e rule contexts. + SetE(IExprContext) + + + // Getter signatures + Expr() IExprContext + QUESTIONMARK() antlr.TerminalNode + + // IsOptExprContext differentiates from other interfaces. + IsOptExprContext() +} + +type OptExprContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser + opt antlr.Token + e IExprContext +} + +func NewEmptyOptExprContext() *OptExprContext { + var p = new(OptExprContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optExpr + return p +} + +func InitEmptyOptExprContext(p *OptExprContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_optExpr +} + +func (*OptExprContext) IsOptExprContext() {} + +func NewOptExprContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *OptExprContext { + var p = new(OptExprContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_optExpr + + return p +} + +func (s *OptExprContext) GetParser() antlr.Parser { return s.parser } + +func (s *OptExprContext) GetOpt() antlr.Token { return s.opt } + + +func (s *OptExprContext) SetOpt(v antlr.Token) { s.opt = v } + + +func (s *OptExprContext) GetE() IExprContext { return s.e } + + +func (s *OptExprContext) SetE(v IExprContext) { s.e = v } + + +func (s *OptExprContext) Expr() IExprContext { + var t antlr.RuleContext; + for _, ctx := range s.GetChildren() { + if _, ok := ctx.(IExprContext); ok { + t = ctx.(antlr.RuleContext); + break + } + } + + if t == nil { + return nil + } + + return t.(IExprContext) +} + +func (s *OptExprContext) QUESTIONMARK() antlr.TerminalNode { + return s.GetToken(CELParserQUESTIONMARK, 0) +} + +func (s *OptExprContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *OptExprContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + +func (s *OptExprContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterOptExpr(s) + } +} + +func (s *OptExprContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitOptExpr(s) + } +} + +func (s *OptExprContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitOptExpr(s) + + default: + return t.VisitChildren(s) + } +} + + + + +func (p *CELParser) OptExpr() (localctx IOptExprContext) { + localctx = NewOptExprContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 28, CELParserRULE_optExpr) + var _la int + + p.EnterOuterAlt(localctx, 1) + p.SetState(230) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserQUESTIONMARK { + { + p.SetState(229) + + var _m = p.Match(CELParserQUESTIONMARK) + + localctx.(*OptExprContext).opt = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(232) + + var _x = p.Expr() + + + localctx.(*OptExprContext).e = _x + } + + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +// ILiteralContext is an interface to support dynamic dispatch. +type ILiteralContext interface { + antlr.ParserRuleContext + + // GetParser returns the parser. + GetParser() antlr.Parser + // IsLiteralContext differentiates from other interfaces. + IsLiteralContext() +} + +type LiteralContext struct { + antlr.BaseParserRuleContext + parser antlr.Parser +} + +func NewEmptyLiteralContext() *LiteralContext { + var p = new(LiteralContext) + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_literal + return p +} + +func InitEmptyLiteralContext(p *LiteralContext) { + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, nil, -1) + p.RuleIndex = CELParserRULE_literal +} + +func (*LiteralContext) IsLiteralContext() {} + +func NewLiteralContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *LiteralContext { + var p = new(LiteralContext) + + antlr.InitBaseParserRuleContext(&p.BaseParserRuleContext, parent, invokingState) + + p.parser = parser + p.RuleIndex = CELParserRULE_literal + + return p +} + +func (s *LiteralContext) GetParser() antlr.Parser { return s.parser } + +func (s *LiteralContext) CopyAll(ctx *LiteralContext) { + s.CopyFrom(&ctx.BaseParserRuleContext) +} + +func (s *LiteralContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *LiteralContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string { + return antlr.TreesStringTree(s, ruleNames, recog) +} + + + + +type BytesContext struct { + LiteralContext + tok antlr.Token +} + +func NewBytesContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BytesContext { + var p = new(BytesContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *BytesContext) GetTok() antlr.Token { return s.tok } + + +func (s *BytesContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *BytesContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *BytesContext) BYTES() antlr.TerminalNode { + return s.GetToken(CELParserBYTES, 0) +} + + +func (s *BytesContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterBytes(s) + } +} + +func (s *BytesContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitBytes(s) + } +} + +func (s *BytesContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitBytes(s) + + default: + return t.VisitChildren(s) + } +} + + +type UintContext struct { + LiteralContext + tok antlr.Token +} + +func NewUintContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UintContext { + var p = new(UintContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *UintContext) GetTok() antlr.Token { return s.tok } + + +func (s *UintContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *UintContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *UintContext) NUM_UINT() antlr.TerminalNode { + return s.GetToken(CELParserNUM_UINT, 0) +} + + +func (s *UintContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterUint(s) + } +} + +func (s *UintContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitUint(s) + } +} + +func (s *UintContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitUint(s) + + default: + return t.VisitChildren(s) + } +} + + +type NullContext struct { + LiteralContext + tok antlr.Token +} + +func NewNullContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *NullContext { + var p = new(NullContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *NullContext) GetTok() antlr.Token { return s.tok } + + +func (s *NullContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *NullContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *NullContext) NUL() antlr.TerminalNode { + return s.GetToken(CELParserNUL, 0) +} + + +func (s *NullContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterNull(s) + } +} + +func (s *NullContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitNull(s) + } +} + +func (s *NullContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitNull(s) + + default: + return t.VisitChildren(s) + } +} + + +type BoolFalseContext struct { + LiteralContext + tok antlr.Token +} + +func NewBoolFalseContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolFalseContext { + var p = new(BoolFalseContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *BoolFalseContext) GetTok() antlr.Token { return s.tok } + + +func (s *BoolFalseContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *BoolFalseContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *BoolFalseContext) CEL_FALSE() antlr.TerminalNode { + return s.GetToken(CELParserCEL_FALSE, 0) +} + + +func (s *BoolFalseContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterBoolFalse(s) + } +} + +func (s *BoolFalseContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitBoolFalse(s) + } +} + +func (s *BoolFalseContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitBoolFalse(s) + + default: + return t.VisitChildren(s) + } +} + + +type StringContext struct { + LiteralContext + tok antlr.Token +} + +func NewStringContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *StringContext { + var p = new(StringContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *StringContext) GetTok() antlr.Token { return s.tok } + + +func (s *StringContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *StringContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *StringContext) STRING() antlr.TerminalNode { + return s.GetToken(CELParserSTRING, 0) +} + + +func (s *StringContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterString(s) + } +} + +func (s *StringContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitString(s) + } +} + +func (s *StringContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitString(s) + + default: + return t.VisitChildren(s) + } +} + + +type DoubleContext struct { + LiteralContext + sign antlr.Token + tok antlr.Token +} + +func NewDoubleContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *DoubleContext { + var p = new(DoubleContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *DoubleContext) GetSign() antlr.Token { return s.sign } + +func (s *DoubleContext) GetTok() antlr.Token { return s.tok } + + +func (s *DoubleContext) SetSign(v antlr.Token) { s.sign = v } + +func (s *DoubleContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *DoubleContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *DoubleContext) NUM_FLOAT() antlr.TerminalNode { + return s.GetToken(CELParserNUM_FLOAT, 0) +} + +func (s *DoubleContext) MINUS() antlr.TerminalNode { + return s.GetToken(CELParserMINUS, 0) +} + + +func (s *DoubleContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterDouble(s) + } +} + +func (s *DoubleContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitDouble(s) + } +} + +func (s *DoubleContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitDouble(s) + + default: + return t.VisitChildren(s) + } +} + + +type BoolTrueContext struct { + LiteralContext + tok antlr.Token +} + +func NewBoolTrueContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BoolTrueContext { + var p = new(BoolTrueContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *BoolTrueContext) GetTok() antlr.Token { return s.tok } + + +func (s *BoolTrueContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *BoolTrueContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *BoolTrueContext) CEL_TRUE() antlr.TerminalNode { + return s.GetToken(CELParserCEL_TRUE, 0) +} + + +func (s *BoolTrueContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterBoolTrue(s) + } +} + +func (s *BoolTrueContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitBoolTrue(s) + } +} + +func (s *BoolTrueContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitBoolTrue(s) + + default: + return t.VisitChildren(s) + } +} + + +type IntContext struct { + LiteralContext + sign antlr.Token + tok antlr.Token +} + +func NewIntContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *IntContext { + var p = new(IntContext) + + InitEmptyLiteralContext(&p.LiteralContext) + p.parser = parser + p.CopyAll(ctx.(*LiteralContext)) + + return p +} + + +func (s *IntContext) GetSign() antlr.Token { return s.sign } + +func (s *IntContext) GetTok() antlr.Token { return s.tok } + + +func (s *IntContext) SetSign(v antlr.Token) { s.sign = v } + +func (s *IntContext) SetTok(v antlr.Token) { s.tok = v } + +func (s *IntContext) GetRuleContext() antlr.RuleContext { + return s +} + +func (s *IntContext) NUM_INT() antlr.TerminalNode { + return s.GetToken(CELParserNUM_INT, 0) +} + +func (s *IntContext) MINUS() antlr.TerminalNode { + return s.GetToken(CELParserMINUS, 0) +} + + +func (s *IntContext) EnterRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.EnterInt(s) + } +} + +func (s *IntContext) ExitRule(listener antlr.ParseTreeListener) { + if listenerT, ok := listener.(CELListener); ok { + listenerT.ExitInt(s) + } +} + +func (s *IntContext) Accept(visitor antlr.ParseTreeVisitor) interface{} { + switch t := visitor.(type) { + case CELVisitor: + return t.VisitInt(s) + + default: + return t.VisitChildren(s) + } +} + + + +func (p *CELParser) Literal() (localctx ILiteralContext) { + localctx = NewLiteralContext(p, p.GetParserRuleContext(), p.GetState()) + p.EnterRule(localctx, 30, CELParserRULE_literal) + var _la int + + p.SetState(248) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + + switch p.GetInterpreter().AdaptivePredict(p.BaseParser, p.GetTokenStream(), 34, p.GetParserRuleContext()) { + case 1: + localctx = NewIntContext(p, localctx) + p.EnterOuterAlt(localctx, 1) + p.SetState(235) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserMINUS { + { + p.SetState(234) + + var _m = p.Match(CELParserMINUS) + + localctx.(*IntContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(237) + + var _m = p.Match(CELParserNUM_INT) + + localctx.(*IntContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 2: + localctx = NewUintContext(p, localctx) + p.EnterOuterAlt(localctx, 2) + { + p.SetState(238) + + var _m = p.Match(CELParserNUM_UINT) + + localctx.(*UintContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 3: + localctx = NewDoubleContext(p, localctx) + p.EnterOuterAlt(localctx, 3) + p.SetState(240) + p.GetErrorHandler().Sync(p) + if p.HasError() { + goto errorExit + } + _la = p.GetTokenStream().LA(1) + + + if _la == CELParserMINUS { + { + p.SetState(239) + + var _m = p.Match(CELParserMINUS) + + localctx.(*DoubleContext).sign = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + } + { + p.SetState(242) + + var _m = p.Match(CELParserNUM_FLOAT) + + localctx.(*DoubleContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 4: + localctx = NewStringContext(p, localctx) + p.EnterOuterAlt(localctx, 4) + { + p.SetState(243) + + var _m = p.Match(CELParserSTRING) + + localctx.(*StringContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 5: + localctx = NewBytesContext(p, localctx) + p.EnterOuterAlt(localctx, 5) + { + p.SetState(244) + + var _m = p.Match(CELParserBYTES) + + localctx.(*BytesContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 6: + localctx = NewBoolTrueContext(p, localctx) + p.EnterOuterAlt(localctx, 6) + { + p.SetState(245) + + var _m = p.Match(CELParserCEL_TRUE) + + localctx.(*BoolTrueContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 7: + localctx = NewBoolFalseContext(p, localctx) + p.EnterOuterAlt(localctx, 7) + { + p.SetState(246) + + var _m = p.Match(CELParserCEL_FALSE) + + localctx.(*BoolFalseContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + + case 8: + localctx = NewNullContext(p, localctx) + p.EnterOuterAlt(localctx, 8) + { + p.SetState(247) + + var _m = p.Match(CELParserNUL) + + localctx.(*NullContext).tok = _m + if p.HasError() { + // Recognition error - abort rule + goto errorExit + } + } + + case antlr.ATNInvalidAltNumber: + goto errorExit + } + + +errorExit: + if p.HasError() { + v := p.GetError() + localctx.SetException(v) + p.GetErrorHandler().ReportError(p, v) + p.GetErrorHandler().Recover(p, v) + p.SetError(nil) + } + p.ExitRule() + return localctx + goto errorExit // Trick to prevent compiler error if the label is not used +} + + +func (p *CELParser) Sempred(localctx antlr.RuleContext, ruleIndex, predIndex int) bool { + switch ruleIndex { + case 4: + var t *RelationContext = nil + if localctx != nil { t = localctx.(*RelationContext) } + return p.Relation_Sempred(t, predIndex) + + case 5: + var t *CalcContext = nil + if localctx != nil { t = localctx.(*CalcContext) } + return p.Calc_Sempred(t, predIndex) + + case 7: + var t *MemberContext = nil + if localctx != nil { t = localctx.(*MemberContext) } + return p.Member_Sempred(t, predIndex) + + + default: + panic("No predicate with index: " + fmt.Sprint(ruleIndex)) + } +} + +func (p *CELParser) Relation_Sempred(localctx antlr.RuleContext, predIndex int) bool { + switch predIndex { + case 0: + return p.Precpred(p.GetParserRuleContext(), 1) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} + +func (p *CELParser) Calc_Sempred(localctx antlr.RuleContext, predIndex int) bool { + switch predIndex { + case 1: + return p.Precpred(p.GetParserRuleContext(), 2) + + case 2: + return p.Precpred(p.GetParserRuleContext(), 1) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} + +func (p *CELParser) Member_Sempred(localctx antlr.RuleContext, predIndex int) bool { + switch predIndex { + case 3: + return p.Precpred(p.GetParserRuleContext(), 3) + + case 4: + return p.Precpred(p.GetParserRuleContext(), 2) + + case 5: + return p.Precpred(p.GetParserRuleContext(), 1) + + default: + panic("No predicate with index: " + fmt.Sprint(predIndex)) + } +} + diff --git a/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go new file mode 100644 index 00000000..d2fbd563 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/cel_visitor.go @@ -0,0 +1,110 @@ +// Code generated from /usr/local/google/home/tswadell/go/src/github.com/google/cel-go/parser/gen/CEL.g4 by ANTLR 4.13.1. DO NOT EDIT. + +package gen // CEL +import "github.com/antlr4-go/antlr/v4" + + +// A complete Visitor for a parse tree produced by CELParser. +type CELVisitor interface { + antlr.ParseTreeVisitor + + // Visit a parse tree produced by CELParser#start. + VisitStart(ctx *StartContext) interface{} + + // Visit a parse tree produced by CELParser#expr. + VisitExpr(ctx *ExprContext) interface{} + + // Visit a parse tree produced by CELParser#conditionalOr. + VisitConditionalOr(ctx *ConditionalOrContext) interface{} + + // Visit a parse tree produced by CELParser#conditionalAnd. + VisitConditionalAnd(ctx *ConditionalAndContext) interface{} + + // Visit a parse tree produced by CELParser#relation. + VisitRelation(ctx *RelationContext) interface{} + + // Visit a parse tree produced by CELParser#calc. + VisitCalc(ctx *CalcContext) interface{} + + // Visit a parse tree produced by CELParser#MemberExpr. + VisitMemberExpr(ctx *MemberExprContext) interface{} + + // Visit a parse tree produced by CELParser#LogicalNot. + VisitLogicalNot(ctx *LogicalNotContext) interface{} + + // Visit a parse tree produced by CELParser#Negate. + VisitNegate(ctx *NegateContext) interface{} + + // Visit a parse tree produced by CELParser#MemberCall. + VisitMemberCall(ctx *MemberCallContext) interface{} + + // Visit a parse tree produced by CELParser#Select. + VisitSelect(ctx *SelectContext) interface{} + + // Visit a parse tree produced by CELParser#PrimaryExpr. + VisitPrimaryExpr(ctx *PrimaryExprContext) interface{} + + // Visit a parse tree produced by CELParser#Index. + VisitIndex(ctx *IndexContext) interface{} + + // Visit a parse tree produced by CELParser#IdentOrGlobalCall. + VisitIdentOrGlobalCall(ctx *IdentOrGlobalCallContext) interface{} + + // Visit a parse tree produced by CELParser#Nested. + VisitNested(ctx *NestedContext) interface{} + + // Visit a parse tree produced by CELParser#CreateList. + VisitCreateList(ctx *CreateListContext) interface{} + + // Visit a parse tree produced by CELParser#CreateStruct. + VisitCreateStruct(ctx *CreateStructContext) interface{} + + // Visit a parse tree produced by CELParser#CreateMessage. + VisitCreateMessage(ctx *CreateMessageContext) interface{} + + // Visit a parse tree produced by CELParser#ConstantLiteral. + VisitConstantLiteral(ctx *ConstantLiteralContext) interface{} + + // Visit a parse tree produced by CELParser#exprList. + VisitExprList(ctx *ExprListContext) interface{} + + // Visit a parse tree produced by CELParser#listInit. + VisitListInit(ctx *ListInitContext) interface{} + + // Visit a parse tree produced by CELParser#fieldInitializerList. + VisitFieldInitializerList(ctx *FieldInitializerListContext) interface{} + + // Visit a parse tree produced by CELParser#optField. + VisitOptField(ctx *OptFieldContext) interface{} + + // Visit a parse tree produced by CELParser#mapInitializerList. + VisitMapInitializerList(ctx *MapInitializerListContext) interface{} + + // Visit a parse tree produced by CELParser#optExpr. + VisitOptExpr(ctx *OptExprContext) interface{} + + // Visit a parse tree produced by CELParser#Int. + VisitInt(ctx *IntContext) interface{} + + // Visit a parse tree produced by CELParser#Uint. + VisitUint(ctx *UintContext) interface{} + + // Visit a parse tree produced by CELParser#Double. + VisitDouble(ctx *DoubleContext) interface{} + + // Visit a parse tree produced by CELParser#String. + VisitString(ctx *StringContext) interface{} + + // Visit a parse tree produced by CELParser#Bytes. + VisitBytes(ctx *BytesContext) interface{} + + // Visit a parse tree produced by CELParser#BoolTrue. + VisitBoolTrue(ctx *BoolTrueContext) interface{} + + // Visit a parse tree produced by CELParser#BoolFalse. + VisitBoolFalse(ctx *BoolFalseContext) interface{} + + // Visit a parse tree produced by CELParser#Null. + VisitNull(ctx *NullContext) interface{} + +} \ No newline at end of file diff --git a/vendor/github.com/google/cel-go/parser/gen/doc.go b/vendor/github.com/google/cel-go/parser/gen/doc.go new file mode 100644 index 00000000..57edd443 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/doc.go @@ -0,0 +1,16 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gen contains all of the ANTLR-generated sources used by the cel-go parser. +package gen diff --git a/vendor/github.com/google/cel-go/parser/gen/generate.sh b/vendor/github.com/google/cel-go/parser/gen/generate.sh new file mode 100644 index 00000000..27a9559f --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/gen/generate.sh @@ -0,0 +1,35 @@ +#!/bin/bash -eu +# +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# To regenerate the CEL lexer/parser statically do the following: +# 1. Download the latest anltr tool from https://www.antlr.org/download.html +# 2. Copy the downloaded jar to the gen directory. It will have a name +# like antlr--complete.jar. +# 3. Modify the script below to refer to the current ANTLR version. +# 4. Execute the generation script from the gen directory. +# 5. Delete the jar and commit the regenerated sources. + +#!/bin/sh + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Generate AntLR artifacts. +java -Xmx500M -cp ${DIR}/antlr-4.13.1-complete.jar org.antlr.v4.Tool \ + -Dlanguage=Go \ + -package gen \ + -o ${DIR} \ + -visitor ${DIR}/CEL.g4 + diff --git a/vendor/github.com/google/cel-go/parser/helper.go b/vendor/github.com/google/cel-go/parser/helper.go new file mode 100644 index 00000000..96748358 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/helper.go @@ -0,0 +1,482 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "sync" + + antlr "github.com/antlr4-go/antlr/v4" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +type parserHelper struct { + exprFactory ast.ExprFactory + source common.Source + sourceInfo *ast.SourceInfo + nextID int64 +} + +func newParserHelper(source common.Source, fac ast.ExprFactory) *parserHelper { + return &parserHelper{ + exprFactory: fac, + source: source, + sourceInfo: ast.NewSourceInfo(source), + nextID: 1, + } +} + +func (p *parserHelper) getSourceInfo() *ast.SourceInfo { + return p.sourceInfo +} + +func (p *parserHelper) newLiteral(ctx any, value ref.Val) ast.Expr { + return p.exprFactory.NewLiteral(p.newID(ctx), value) +} + +func (p *parserHelper) newLiteralBool(ctx any, value bool) ast.Expr { + return p.newLiteral(ctx, types.Bool(value)) +} + +func (p *parserHelper) newLiteralString(ctx any, value string) ast.Expr { + return p.newLiteral(ctx, types.String(value)) +} + +func (p *parserHelper) newLiteralBytes(ctx any, value []byte) ast.Expr { + return p.newLiteral(ctx, types.Bytes(value)) +} + +func (p *parserHelper) newLiteralInt(ctx any, value int64) ast.Expr { + return p.newLiteral(ctx, types.Int(value)) +} + +func (p *parserHelper) newLiteralUint(ctx any, value uint64) ast.Expr { + return p.newLiteral(ctx, types.Uint(value)) +} + +func (p *parserHelper) newLiteralDouble(ctx any, value float64) ast.Expr { + return p.newLiteral(ctx, types.Double(value)) +} + +func (p *parserHelper) newIdent(ctx any, name string) ast.Expr { + return p.exprFactory.NewIdent(p.newID(ctx), name) +} + +func (p *parserHelper) newSelect(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewSelect(p.newID(ctx), operand, field) +} + +func (p *parserHelper) newPresenceTest(ctx any, operand ast.Expr, field string) ast.Expr { + return p.exprFactory.NewPresenceTest(p.newID(ctx), operand, field) +} + +func (p *parserHelper) newGlobalCall(ctx any, function string, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewCall(p.newID(ctx), function, args...) +} + +func (p *parserHelper) newReceiverCall(ctx any, function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return p.exprFactory.NewMemberCall(p.newID(ctx), function, target, args...) +} + +func (p *parserHelper) newList(ctx any, elements []ast.Expr, optionals ...int32) ast.Expr { + return p.exprFactory.NewList(p.newID(ctx), elements, optionals) +} + +func (p *parserHelper) newMap(ctx any, entries ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewMap(p.newID(ctx), entries) +} + +func (p *parserHelper) newMapEntry(entryID int64, key ast.Expr, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewMapEntry(entryID, key, value, optional) +} + +func (p *parserHelper) newObject(ctx any, typeName string, fields ...ast.EntryExpr) ast.Expr { + return p.exprFactory.NewStruct(p.newID(ctx), typeName, fields) +} + +func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Expr, optional bool) ast.EntryExpr { + return p.exprFactory.NewStructField(fieldID, field, value, optional) +} + +func (p *parserHelper) newComprehension(ctx any, + iterRange ast.Expr, + iterVar string, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return p.exprFactory.NewComprehension( + p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +func (p *parserHelper) newID(ctx any) int64 { + if id, isID := ctx.(int64); isID { + return id + } + return p.id(ctx) +} + +func (p *parserHelper) newExpr(ctx any) ast.Expr { + return p.exprFactory.NewUnspecifiedExpr(p.newID(ctx)) +} + +func (p *parserHelper) id(ctx any) int64 { + var offset ast.OffsetRange + switch c := ctx.(type) { + case antlr.ParserRuleContext: + start := c.GetStart() + offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) + case antlr.Token: + offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn())) + offset.Stop = offset.Start + int32(len(c.GetText())) + case common.Location: + offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column())) + offset.Stop = offset.Start + case ast.OffsetRange: + offset = c + default: + // This should only happen if the ctx is nil + return -1 + } + id := p.nextID + p.sourceInfo.SetOffsetRange(id, offset) + p.nextID++ + return id +} + +func (p *parserHelper) deleteID(id int64) { + p.sourceInfo.ClearOffsetRange(id) + if id == p.nextID-1 { + p.nextID-- + } +} + +func (p *parserHelper) getLocation(id int64) common.Location { + return p.sourceInfo.GetStartLocation(id) +} + +func (p *parserHelper) getLocationByOffset(offset int32) common.Location { + return p.getSourceInfo().GetLocationByOffset(offset) +} + +// buildMacroCallArg iterates the expression and returns a new expression +// where all macros have been replaced by their IDs in MacroCalls +func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr { + if _, found := p.sourceInfo.GetMacroCall(expr.ID()); found { + return p.exprFactory.NewUnspecifiedExpr(expr.ID()) + } + + switch expr.Kind() { + case ast.CallKind: + // Iterate the AST from `expr` recursively looking for macros. Because we are at most + // starting from the top level macro, this recursion is bounded by the size of the AST. This + // means that the depth check on the AST during parsing will catch recursion overflows + // before we get to here. + call := expr.AsCall() + macroArgs := make([]ast.Expr, len(call.Args())) + for index, arg := range call.Args() { + macroArgs[index] = p.buildMacroCallArg(arg) + } + if !call.IsMemberFunction() { + return p.exprFactory.NewCall(expr.ID(), call.FunctionName(), macroArgs...) + } + macroTarget := p.buildMacroCallArg(call.Target()) + return p.exprFactory.NewMemberCall(expr.ID(), call.FunctionName(), macroTarget, macroArgs...) + case ast.ListKind: + list := expr.AsList() + macroListArgs := make([]ast.Expr, list.Size()) + for i, elem := range list.Elements() { + macroListArgs[i] = p.buildMacroCallArg(elem) + } + return p.exprFactory.NewList(expr.ID(), macroListArgs, list.OptionalIndices()) + } + return expr +} + +// addMacroCall adds the macro the the MacroCalls map in source info. If a macro has args/subargs/target +// that are macros, their ID will be stored instead for later self-lookups. +func (p *parserHelper) addMacroCall(exprID int64, function string, target ast.Expr, args ...ast.Expr) { + macroArgs := make([]ast.Expr, len(args)) + for index, arg := range args { + macroArgs[index] = p.buildMacroCallArg(arg) + } + if target == nil { + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewCall(0, function, macroArgs...)) + return + } + macroTarget := target + if _, found := p.sourceInfo.GetMacroCall(target.ID()); found { + macroTarget = p.exprFactory.NewUnspecifiedExpr(target.ID()) + } else { + macroTarget = p.buildMacroCallArg(target) + } + p.sourceInfo.SetMacroCall(exprID, p.exprFactory.NewMemberCall(0, function, macroTarget, macroArgs...)) +} + +// logicManager compacts logical trees into a more efficient structure which is semantically +// equivalent with how the logic graph is constructed by the ANTLR parser. +// +// The purpose of the logicManager is to ensure a compact serialization format for the logical &&, || +// operators which have a tendency to create long DAGs which are skewed in one direction. Since the +// operators are commutative re-ordering the terms *must not* affect the evaluation result. +// +// The logic manager will either render the terms to N-chained && / || operators as a single logical +// call with N-terms, or will rebalance the tree. Rebalancing the terms is a safe, if somewhat +// controversial choice as it alters the traditional order of execution assumptions present in most +// expressions. +type logicManager struct { + exprFactory ast.ExprFactory + function string + terms []ast.Expr + ops []int64 + variadicASTs bool +} + +// newVariadicLogicManager creates a logic manager instance bound to a specific function and its first term. +func newVariadicLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { + return &logicManager{ + exprFactory: fac, + function: function, + terms: []ast.Expr{term}, + ops: []int64{}, + variadicASTs: true, + } +} + +// newBalancingLogicManager creates a logic manager instance bound to a specific function and its first term. +func newBalancingLogicManager(fac ast.ExprFactory, function string, term ast.Expr) *logicManager { + return &logicManager{ + exprFactory: fac, + function: function, + terms: []ast.Expr{term}, + ops: []int64{}, + variadicASTs: false, + } +} + +// addTerm adds an operation identifier and term to the set of terms to be balanced. +func (l *logicManager) addTerm(op int64, term ast.Expr) { + l.terms = append(l.terms, term) + l.ops = append(l.ops, op) +} + +// toExpr renders the logic graph into an Expr value, either balancing a tree of logical +// operations or creating a variadic representation of the logical operator. +func (l *logicManager) toExpr() ast.Expr { + if len(l.terms) == 1 { + return l.terms[0] + } + if l.variadicASTs { + return l.exprFactory.NewCall(l.ops[0], l.function, l.terms...) + } + return l.balancedTree(0, len(l.ops)-1) +} + +// balancedTree recursively balances the terms provided to a commutative operator. +func (l *logicManager) balancedTree(lo, hi int) ast.Expr { + mid := (lo + hi + 1) / 2 + + var left ast.Expr + if mid == lo { + left = l.terms[mid] + } else { + left = l.balancedTree(lo, mid-1) + } + + var right ast.Expr + if mid == hi { + right = l.terms[mid+1] + } else { + right = l.balancedTree(mid+1, hi) + } + return l.exprFactory.NewCall(l.ops[mid], l.function, left, right) +} + +type exprHelper struct { + *parserHelper + id int64 +} + +func (e *exprHelper) nextMacroID() int64 { + return e.parserHelper.id(e.parserHelper.getLocation(e.id)) +} + +// Copy implements the ExprHelper interface method by producing a copy of the input Expr value +// with a fresh set of numeric identifiers the Expr and all its descendants. +func (e *exprHelper) Copy(expr ast.Expr) ast.Expr { + offsetRange, _ := e.parserHelper.sourceInfo.GetOffsetRange(expr.ID()) + copyID := e.parserHelper.newID(offsetRange) + switch expr.Kind() { + case ast.LiteralKind: + return e.exprFactory.NewLiteral(copyID, expr.AsLiteral()) + case ast.IdentKind: + return e.exprFactory.NewIdent(copyID, expr.AsIdent()) + case ast.SelectKind: + sel := expr.AsSelect() + op := e.Copy(sel.Operand()) + if sel.IsTestOnly() { + return e.exprFactory.NewPresenceTest(copyID, op, sel.FieldName()) + } + return e.exprFactory.NewSelect(copyID, op, sel.FieldName()) + case ast.CallKind: + call := expr.AsCall() + args := call.Args() + argsCopy := make([]ast.Expr, len(args)) + for i, arg := range args { + argsCopy[i] = e.Copy(arg) + } + if !call.IsMemberFunction() { + return e.exprFactory.NewCall(copyID, call.FunctionName(), argsCopy...) + } + return e.exprFactory.NewMemberCall(copyID, call.FunctionName(), e.Copy(call.Target()), argsCopy...) + case ast.ListKind: + list := expr.AsList() + elems := list.Elements() + elemsCopy := make([]ast.Expr, len(elems)) + for i, elem := range elems { + elemsCopy[i] = e.Copy(elem) + } + return e.exprFactory.NewList(copyID, elemsCopy, list.OptionalIndices()) + case ast.MapKind: + m := expr.AsMap() + entries := m.Entries() + entriesCopy := make([]ast.EntryExpr, len(entries)) + for i, en := range entries { + entry := en.AsMapEntry() + entryID := e.nextMacroID() + entriesCopy[i] = e.exprFactory.NewMapEntry(entryID, + e.Copy(entry.Key()), e.Copy(entry.Value()), entry.IsOptional()) + } + return e.exprFactory.NewMap(copyID, entriesCopy) + case ast.StructKind: + s := expr.AsStruct() + fields := s.Fields() + fieldsCopy := make([]ast.EntryExpr, len(fields)) + for i, f := range fields { + field := f.AsStructField() + fieldID := e.nextMacroID() + fieldsCopy[i] = e.exprFactory.NewStructField(fieldID, + field.Name(), e.Copy(field.Value()), field.IsOptional()) + } + return e.exprFactory.NewStruct(copyID, s.TypeName(), fieldsCopy) + case ast.ComprehensionKind: + compre := expr.AsComprehension() + iterRange := e.Copy(compre.IterRange()) + accuInit := e.Copy(compre.AccuInit()) + cond := e.Copy(compre.LoopCondition()) + step := e.Copy(compre.LoopStep()) + result := e.Copy(compre.Result()) + return e.exprFactory.NewComprehension(copyID, + iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result) + } + return e.exprFactory.NewUnspecifiedExpr(copyID) +} + +// NewLiteral implements the ExprHelper interface method. +func (e *exprHelper) NewLiteral(value ref.Val) ast.Expr { + return e.exprFactory.NewLiteral(e.nextMacroID(), value) +} + +// NewList implements the ExprHelper interface method. +func (e *exprHelper) NewList(elems ...ast.Expr) ast.Expr { + return e.exprFactory.NewList(e.nextMacroID(), elems, []int32{}) +} + +// NewMap implements the ExprHelper interface method. +func (e *exprHelper) NewMap(entries ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewMap(e.nextMacroID(), entries) +} + +// NewMapEntry implements the ExprHelper interface method. +func (e *exprHelper) NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewMapEntry(e.nextMacroID(), key, val, optional) +} + +// NewStruct implements the ExprHelper interface method. +func (e *exprHelper) NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr { + return e.exprFactory.NewStruct(e.nextMacroID(), typeName, fieldInits) +} + +// NewStructField implements the ExprHelper interface method. +func (e *exprHelper) NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr { + return e.exprFactory.NewStructField(e.nextMacroID(), field, init, optional) +} + +// NewComprehension implements the ExprHelper interface method. +func (e *exprHelper) NewComprehension( + iterRange ast.Expr, + iterVar string, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr { + return e.exprFactory.NewComprehension( + e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result) +} + +// NewIdent implements the ExprHelper interface method. +func (e *exprHelper) NewIdent(name string) ast.Expr { + return e.exprFactory.NewIdent(e.nextMacroID(), name) +} + +// NewAccuIdent implements the ExprHelper interface method. +func (e *exprHelper) NewAccuIdent() ast.Expr { + return e.exprFactory.NewAccuIdent(e.nextMacroID()) +} + +// NewGlobalCall implements the ExprHelper interface method. +func (e *exprHelper) NewCall(function string, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewCall(e.nextMacroID(), function, args...) +} + +// NewMemberCall implements the ExprHelper interface method. +func (e *exprHelper) NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr { + return e.exprFactory.NewMemberCall(e.nextMacroID(), function, target, args...) +} + +// NewPresenceTest implements the ExprHelper interface method. +func (e *exprHelper) NewPresenceTest(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewPresenceTest(e.nextMacroID(), operand, field) +} + +// NewSelect implements the ExprHelper interface method. +func (e *exprHelper) NewSelect(operand ast.Expr, field string) ast.Expr { + return e.exprFactory.NewSelect(e.nextMacroID(), operand, field) +} + +// OffsetLocation implements the ExprHelper interface method. +func (e *exprHelper) OffsetLocation(exprID int64) common.Location { + return e.parserHelper.sourceInfo.GetStartLocation(exprID) +} + +// NewError associates an error message with a given expression id, populating the source offset location of the error if possible. +func (e *exprHelper) NewError(exprID int64, message string) *common.Error { + return common.NewError(exprID, message, e.OffsetLocation(exprID)) +} + +var ( + // Thread-safe pool of ExprHelper values to minimize alloc overhead of ExprHelper creations. + exprHelperPool = &sync.Pool{ + New: func() any { + return &exprHelper{} + }, + } +) diff --git a/vendor/github.com/google/cel-go/parser/input.go b/vendor/github.com/google/cel-go/parser/input.go new file mode 100644 index 00000000..44792455 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/input.go @@ -0,0 +1,129 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + antlr "github.com/antlr4-go/antlr/v4" + + "github.com/google/cel-go/common/runes" +) + +type charStream struct { + buf runes.Buffer + pos int + src string +} + +// Consume implements (antlr.CharStream).Consume. +func (c *charStream) Consume() { + if c.pos >= c.buf.Len() { + panic("cannot consume EOF") + } + c.pos++ +} + +// LA implements (antlr.CharStream).LA. +func (c *charStream) LA(offset int) int { + if offset == 0 { + return 0 + } + if offset < 0 { + offset++ + } + pos := c.pos + offset - 1 + if pos < 0 || pos >= c.buf.Len() { + return antlr.TokenEOF + } + return int(c.buf.Get(pos)) +} + +// LT mimics (*antlr.InputStream).LT. +func (c *charStream) LT(offset int) int { + return c.LA(offset) +} + +// Mark implements (antlr.CharStream).Mark. +func (c *charStream) Mark() int { + return -1 +} + +// Release implements (antlr.CharStream).Release. +func (c *charStream) Release(marker int) {} + +// Index implements (antlr.CharStream).Index. +func (c *charStream) Index() int { + return c.pos +} + +// Seek implements (antlr.CharStream).Seek. +func (c *charStream) Seek(index int) { + if index <= c.pos { + c.pos = index + return + } + if index < c.buf.Len() { + c.pos = index + } else { + c.pos = c.buf.Len() + } +} + +// Size implements (antlr.CharStream).Size. +func (c *charStream) Size() int { + return c.buf.Len() +} + +// GetSourceName implements (antlr.CharStream).GetSourceName. +func (c *charStream) GetSourceName() string { + return c.src +} + +// GetText implements (antlr.CharStream).GetText. +func (c *charStream) GetText(start, stop int) string { + if stop >= c.buf.Len() { + stop = c.buf.Len() - 1 + } + if start >= c.buf.Len() { + return "" + } + return c.buf.Slice(start, stop+1) +} + +// GetTextFromTokens implements (antlr.CharStream).GetTextFromTokens. +func (c *charStream) GetTextFromTokens(start, stop antlr.Token) string { + if start != nil && stop != nil { + return c.GetText(start.GetTokenIndex(), stop.GetTokenIndex()) + } + return "" +} + +// GetTextFromInterval implements (antlr.CharStream).GetTextFromInterval. +func (c *charStream) GetTextFromInterval(i antlr.Interval) string { + return c.GetText(i.Start, i.Stop) +} + +// String mimics (*antlr.InputStream).String. +func (c *charStream) String() string { + return c.buf.Slice(0, c.buf.Len()) +} + +var _ antlr.CharStream = &charStream{} + +func newCharStream(buf runes.Buffer, desc string) antlr.CharStream { + return &charStream{ + buf: buf, + src: desc, + } +} diff --git a/vendor/github.com/google/cel-go/parser/macro.go b/vendor/github.com/google/cel-go/parser/macro.go new file mode 100644 index 00000000..c1936b69 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/macro.go @@ -0,0 +1,402 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// NewGlobalMacro creates a Macro for a global function with the specified arg count. +func NewGlobalMacro(function string, argCount int, expander MacroExpander) Macro { + return ¯o{ + function: function, + argCount: argCount, + expander: expander} +} + +// NewReceiverMacro creates a Macro for a receiver function matching the specified arg count. +func NewReceiverMacro(function string, argCount int, expander MacroExpander) Macro { + return ¯o{ + function: function, + argCount: argCount, + expander: expander, + receiverStyle: true} +} + +// NewGlobalVarArgMacro creates a Macro for a global function with a variable arg count. +func NewGlobalVarArgMacro(function string, expander MacroExpander) Macro { + return ¯o{ + function: function, + expander: expander, + varArgStyle: true} +} + +// NewReceiverVarArgMacro creates a Macro for a receiver function matching a variable arg count. +func NewReceiverVarArgMacro(function string, expander MacroExpander) Macro { + return ¯o{ + function: function, + expander: expander, + receiverStyle: true, + varArgStyle: true} +} + +// Macro interface for describing the function signature to match and the MacroExpander to apply. +// +// Note: when a Macro should apply to multiple overloads (based on arg count) of a given function, +// a Macro should be created per arg-count. +type Macro interface { + // Function name to match. + Function() string + + // ArgCount for the function call. + // + // When the macro is a var-arg style macro, the return value will be zero, but the MacroKey + // will contain a `*` where the arg count would have been. + ArgCount() int + + // IsReceiverStyle returns true if the macro matches a receiver style call. + IsReceiverStyle() bool + + // MacroKey returns the macro signatures accepted by this macro. + // + // Format: `::`. + // + // When the macros is a var-arg style macro, the `arg-count` value is represented as a `*`. + MacroKey() string + + // Expander returns the MacroExpander to apply when the macro key matches the parsed call + // signature. + Expander() MacroExpander +} + +// Macro type which declares the function name and arg count expected for the +// macro, as well as a macro expansion function. +type macro struct { + function string + receiverStyle bool + varArgStyle bool + argCount int + expander MacroExpander +} + +// Function returns the macro's function name (i.e. the function whose syntax it mimics). +func (m *macro) Function() string { + return m.function +} + +// ArgCount returns the number of arguments the macro expects. +func (m *macro) ArgCount() int { + return m.argCount +} + +// IsReceiverStyle returns whether the macro is receiver style. +func (m *macro) IsReceiverStyle() bool { + return m.receiverStyle +} + +// Expander implements the Macro interface method. +func (m *macro) Expander() MacroExpander { + return m.expander +} + +// MacroKey implements the Macro interface method. +func (m *macro) MacroKey() string { + if m.varArgStyle { + return makeVarArgMacroKey(m.function, m.receiverStyle) + } + return makeMacroKey(m.function, m.argCount, m.receiverStyle) +} + +func makeMacroKey(name string, args int, receiverStyle bool) string { + return fmt.Sprintf("%s:%d:%v", name, args, receiverStyle) +} + +func makeVarArgMacroKey(name string, receiverStyle bool) string { + return fmt.Sprintf("%s:*:%v", name, receiverStyle) +} + +// MacroExpander converts a call and its associated arguments into a new CEL abstract syntax tree. +// +// If the MacroExpander determines within the implementation that an expansion is not needed it may return +// a nil Expr value to indicate a non-match. However, if an expansion is to be performed, but the arguments +// are not well-formed, the result of the expansion will be an error. +// +// The MacroExpander accepts as arguments a MacroExprHelper as well as the arguments used in the function call +// and produces as output an Expr ast node. +// +// Note: when the Macro.IsReceiverStyle() method returns true, the target argument will be nil. +type MacroExpander func(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) + +// ExprHelper assists with the creation of Expr values in a manner which is consistent +// the internal semantics and id generation behaviors of the parser and checker libraries. +type ExprHelper interface { + // Copy the input expression with a brand new set of identifiers. + Copy(ast.Expr) ast.Expr + + // Literal creates an Expr value for a scalar literal value. + NewLiteral(value ref.Val) ast.Expr + + // NewList creates a list literal instruction with an optional set of elements. + NewList(elems ...ast.Expr) ast.Expr + + // NewMap creates a CreateStruct instruction for a map where the map is comprised of the + // optional set of key, value entries. + NewMap(entries ...ast.EntryExpr) ast.Expr + + // NewMapEntry creates a Map Entry for the key, value pair. + NewMapEntry(key ast.Expr, val ast.Expr, optional bool) ast.EntryExpr + + // NewStruct creates a struct literal expression with an optional set of field initializers. + NewStruct(typeName string, fieldInits ...ast.EntryExpr) ast.Expr + + // NewStructField creates a new struct field initializer from the field name and value. + NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr + + // NewComprehension creates a new comprehension instruction. + // + // - iterRange represents the expression that resolves to a list or map where the elements or + // keys (respectively) will be iterated over. + // - iterVar is the iteration variable name. + // - accuVar is the accumulation variable name, typically parser.AccumulatorName. + // - accuInit is the initial expression whose value will be set for the accuVar prior to + // folding. + // - condition is the expression to test to determine whether to continue folding. + // - step is the expression to evaluation at the conclusion of a single fold iteration. + // - result is the computation to evaluate at the conclusion of the fold. + // + // The accuVar should not shadow variable names that you would like to reference within the + // environment in the step and condition expressions. Presently, the name __result__ is commonly + // used by built-in macros but this may change in the future. + NewComprehension(iterRange ast.Expr, + iterVar string, + accuVar string, + accuInit ast.Expr, + condition ast.Expr, + step ast.Expr, + result ast.Expr) ast.Expr + + // NewIdent creates an identifier Expr value. + NewIdent(name string) ast.Expr + + // NewAccuIdent returns an accumulator identifier for use with comprehension results. + NewAccuIdent() ast.Expr + + // NewCall creates a function call Expr value for a global (free) function. + NewCall(function string, args ...ast.Expr) ast.Expr + + // NewMemberCall creates a function call Expr value for a receiver-style function. + NewMemberCall(function string, target ast.Expr, args ...ast.Expr) ast.Expr + + // NewPresenceTest creates a Select TestOnly Expr value for modelling has() semantics. + NewPresenceTest(operand ast.Expr, field string) ast.Expr + + // NewSelect create a field traversal Expr value. + NewSelect(operand ast.Expr, field string) ast.Expr + + // OffsetLocation returns the Location of the expression identifier. + OffsetLocation(exprID int64) common.Location + + // NewError associates an error message with a given expression id. + NewError(exprID int64, message string) *common.Error +} + +var ( + // HasMacro expands "has(m.f)" which tests the presence of a field, avoiding the need to + // specify the field as a string. + HasMacro = NewGlobalMacro(operators.Has, 1, MakeHas) + + // AllMacro expands "range.all(var, predicate)" into a comprehension which ensures that all + // elements in the range satisfy the predicate. + AllMacro = NewReceiverMacro(operators.All, 2, MakeAll) + + // ExistsMacro expands "range.exists(var, predicate)" into a comprehension which ensures that + // some element in the range satisfies the predicate. + ExistsMacro = NewReceiverMacro(operators.Exists, 2, MakeExists) + + // ExistsOneMacro expands "range.exists_one(var, predicate)", which is true if for exactly one + // element in range the predicate holds. + ExistsOneMacro = NewReceiverMacro(operators.ExistsOne, 2, MakeExistsOne) + + // MapMacro expands "range.map(var, function)" into a comprehension which applies the function + // to each element in the range to produce a new list. + MapMacro = NewReceiverMacro(operators.Map, 2, MakeMap) + + // MapFilterMacro expands "range.map(var, predicate, function)" into a comprehension which + // first filters the elements in the range by the predicate, then applies the transform function + // to produce a new list. + MapFilterMacro = NewReceiverMacro(operators.Map, 3, MakeMap) + + // FilterMacro expands "range.filter(var, predicate)" into a comprehension which filters + // elements in the range, producing a new list from the elements that satisfy the predicate. + FilterMacro = NewReceiverMacro(operators.Filter, 2, MakeFilter) + + // AllMacros includes the list of all spec-supported macros. + AllMacros = []Macro{ + HasMacro, + AllMacro, + ExistsMacro, + ExistsOneMacro, + MapMacro, + MapFilterMacro, + FilterMacro, + } + + // NoMacros list. + NoMacros = []Macro{} +) + +// AccumulatorName is the traditional variable name assigned to the fold accumulator variable. +const AccumulatorName = "__result__" + +type quantifierKind int + +const ( + quantifierAll quantifierKind = iota + quantifierExists + quantifierExistsOne +) + +// MakeAll expands the input call arguments into a comprehension that returns true if all of the +// elements in the range match the predicate expressions: +// .all(, ) +func MakeAll(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + return makeQuantifier(quantifierAll, eh, target, args) +} + +// MakeExists expands the input call arguments into a comprehension that returns true if any of the +// elements in the range match the predicate expressions: +// .exists(, ) +func MakeExists(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + return makeQuantifier(quantifierExists, eh, target, args) +} + +// MakeExistsOne expands the input call arguments into a comprehension that returns true if exactly +// one of the elements in the range match the predicate expressions: +// .exists_one(, ) +func MakeExistsOne(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + return makeQuantifier(quantifierExistsOne, eh, target, args) +} + +// MakeMap expands the input call arguments into a comprehension that transforms each element in the +// input to produce an output list. +// +// There are two call patterns supported by map: +// +// .map(, ) +// .map(, , ) +// +// In the second form only iterVar values which return true when provided to the predicate expression +// are transformed. +func MakeMap(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + v, found := extractIdent(args[0]) + if !found { + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") + } + + var fn ast.Expr + var filter ast.Expr + + if len(args) == 3 { + filter = args[1] + fn = args[2] + } else { + filter = nil + fn = args[1] + } + + init := eh.NewList() + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(fn)) + + if filter != nil { + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) + } + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil +} + +// MakeFilter expands the input call arguments into a comprehension which produces a list which contains +// only elements which match the provided predicate expression: +// .filter(, ) +func MakeFilter(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + v, found := extractIdent(args[0]) + if !found { + return nil, eh.NewError(args[0].ID(), "argument is not an identifier") + } + + filter := args[1] + init := eh.NewList() + condition := eh.NewLiteral(types.True) + step := eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewList(args[0])) + step = eh.NewCall(operators.Conditional, filter, step, eh.NewAccuIdent()) + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, eh.NewAccuIdent()), nil +} + +// MakeHas expands the input call arguments into a presence test, e.g. has(.field) +func MakeHas(eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + if args[0].Kind() == ast.SelectKind { + s := args[0].AsSelect() + return eh.NewPresenceTest(s.Operand(), s.FieldName()), nil + } + return nil, eh.NewError(args[0].ID(), "invalid argument to has() macro") +} + +func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) { + v, found := extractIdent(args[0]) + if !found { + return nil, eh.NewError(args[0].ID(), "argument must be a simple name") + } + + var init ast.Expr + var condition ast.Expr + var step ast.Expr + var result ast.Expr + switch kind { + case quantifierAll: + init = eh.NewLiteral(types.True) + condition = eh.NewCall(operators.NotStrictlyFalse, eh.NewAccuIdent()) + step = eh.NewCall(operators.LogicalAnd, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() + case quantifierExists: + init = eh.NewLiteral(types.False) + condition = eh.NewCall( + operators.NotStrictlyFalse, + eh.NewCall(operators.LogicalNot, eh.NewAccuIdent())) + step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1]) + result = eh.NewAccuIdent() + case quantifierExistsOne: + init = eh.NewLiteral(types.Int(0)) + condition = eh.NewLiteral(types.True) + step = eh.NewCall(operators.Conditional, args[1], + eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent()) + result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))) + default: + return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind)) + } + return eh.NewComprehension(target, v, AccumulatorName, init, condition, step, result), nil +} + +func extractIdent(e ast.Expr) (string, bool) { + switch e.Kind() { + case ast.IdentKind: + return e.AsIdent(), true + } + return "", false +} diff --git a/vendor/github.com/google/cel-go/parser/options.go b/vendor/github.com/google/cel-go/parser/options.go new file mode 100644 index 00000000..61fc3ade --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/options.go @@ -0,0 +1,140 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import "fmt" + +type options struct { + maxRecursionDepth int + errorReportingLimit int + errorRecoveryTokenLookaheadLimit int + errorRecoveryLimit int + expressionSizeCodePointLimit int + macros map[string]Macro + populateMacroCalls bool + enableOptionalSyntax bool + enableVariadicOperatorASTs bool +} + +// Option configures the behavior of the parser. +type Option func(*options) error + +// MaxRecursionDepth limits the maximum depth the parser will attempt to parse the expression before giving up. +func MaxRecursionDepth(limit int) Option { + return func(opts *options) error { + if limit < -1 { + return fmt.Errorf("max recursion depth must be greater than or equal to -1: %d", limit) + } + opts.maxRecursionDepth = limit + return nil + } +} + +// ErrorRecoveryLookaheadTokenLimit limits the number of lexer tokens that may be considered during error recovery. +// +// Error recovery often involves looking ahead in the input to determine if there's a point at which parsing may +// successfully resume. In some pathological cases, the parser can look through quite a large set of input which +// in turn generates a lot of back-tracking and performance degredation. +// +// The limit must be >= 1, and is recommended to be less than the default of 256. +func ErrorRecoveryLookaheadTokenLimit(limit int) Option { + return func(opts *options) error { + if limit < 1 { + return fmt.Errorf("error recovery lookahead token limit must be at least 1: %d", limit) + } + opts.errorRecoveryTokenLookaheadLimit = limit + return nil + } +} + +// ErrorRecoveryLimit limits the number of attempts the parser will perform to recover from an error. +func ErrorRecoveryLimit(limit int) Option { + return func(opts *options) error { + if limit < -1 { + return fmt.Errorf("error recovery limit must be greater than or equal to -1: %d", limit) + } + opts.errorRecoveryLimit = limit + return nil + } +} + +// ErrorReportingLimit limits the number of syntax error reports before terminating parsing. +// +// The limit must be at least 1. If unset, the limit will be 100. +func ErrorReportingLimit(limit int) Option { + return func(opts *options) error { + if limit < 1 { + return fmt.Errorf("error reporting limit must be at least 1: %d", limit) + } + opts.errorReportingLimit = limit + return nil + } +} + +// ExpressionSizeCodePointLimit is an option which limits the maximum code point count of an +// expression. +func ExpressionSizeCodePointLimit(expressionSizeCodePointLimit int) Option { + return func(opts *options) error { + if expressionSizeCodePointLimit < -1 { + return fmt.Errorf("expression size code point limit must be greater than or equal to -1: %d", expressionSizeCodePointLimit) + } + opts.expressionSizeCodePointLimit = expressionSizeCodePointLimit + return nil + } +} + +// Macros adds the given macros to the parser. +func Macros(macros ...Macro) Option { + return func(opts *options) error { + for _, m := range macros { + if m != nil { + if opts.macros == nil { + opts.macros = make(map[string]Macro) + } + opts.macros[m.MacroKey()] = m + } + } + return nil + } +} + +// PopulateMacroCalls ensures that the original call signatures replaced by expanded macros +// are preserved in the `SourceInfo` of parse result. +func PopulateMacroCalls(populateMacroCalls bool) Option { + return func(opts *options) error { + opts.populateMacroCalls = populateMacroCalls + return nil + } +} + +// EnableOptionalSyntax enables syntax for optional field and index selection. +func EnableOptionalSyntax(optionalSyntax bool) Option { + return func(opts *options) error { + opts.enableOptionalSyntax = optionalSyntax + return nil + } +} + +// EnableVariadicOperatorASTs enables a compact representation of chained like-kind commutative +// operators. e.g. `a || b || c || d` -> `call(op='||', args=[a, b, c, d])` +// +// The benefit of enabling variadic operators ASTs is a more compact representation deeply nested +// logic graphs. +func EnableVariadicOperatorASTs(varArgASTs bool) Option { + return func(opts *options) error { + opts.enableVariadicOperatorASTs = varArgASTs + return nil + } +} diff --git a/vendor/github.com/google/cel-go/parser/parser.go b/vendor/github.com/google/cel-go/parser/parser.go new file mode 100644 index 00000000..5cbb1767 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/parser.go @@ -0,0 +1,1012 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parser declares an expression parser with support for macro +// expansion. +package parser + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + antlr "github.com/antlr4-go/antlr/v4" + + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/runes" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/parser/gen" +) + +// Parser encapsulates the context necessary to perform parsing for different expressions. +type Parser struct { + options +} + +// NewParser builds and returns a new Parser using the provided options. +func NewParser(opts ...Option) (*Parser, error) { + p := &Parser{} + for _, opt := range opts { + if err := opt(&p.options); err != nil { + return nil, err + } + } + if p.errorReportingLimit == 0 { + p.errorReportingLimit = 100 + } + if p.maxRecursionDepth == 0 { + p.maxRecursionDepth = 250 + } + if p.maxRecursionDepth == -1 { + p.maxRecursionDepth = int((^uint(0)) >> 1) + } + if p.errorRecoveryTokenLookaheadLimit == 0 { + p.errorRecoveryTokenLookaheadLimit = 256 + } + if p.errorRecoveryLimit == 0 { + p.errorRecoveryLimit = 30 + } + if p.errorRecoveryLimit == -1 { + p.errorRecoveryLimit = int((^uint(0)) >> 1) + } + if p.expressionSizeCodePointLimit == 0 { + p.expressionSizeCodePointLimit = 100_000 + } + if p.expressionSizeCodePointLimit == -1 { + p.expressionSizeCodePointLimit = int((^uint(0)) >> 1) + } + // Bool is false by default, so populateMacroCalls will be false by default + return p, nil +} + +// mustNewParser does the work of NewParser and panics if an error occurs. +// +// This function is only intended for internal use and is for backwards compatibility in Parse and +// ParseWithMacros, where we know the options will result in an error. +func mustNewParser(opts ...Option) *Parser { + p, err := NewParser(opts...) + if err != nil { + panic(err) + } + return p +} + +// Parse parses the expression represented by source and returns the result. +func (p *Parser) Parse(source common.Source) (*ast.AST, *common.Errors) { + errs := common.NewErrors(source) + fac := ast.NewExprFactory() + impl := parser{ + errors: &parseErrors{errs}, + exprFactory: fac, + helper: newParserHelper(source, fac), + macros: p.macros, + maxRecursionDepth: p.maxRecursionDepth, + errorReportingLimit: p.errorReportingLimit, + errorRecoveryLimit: p.errorRecoveryLimit, + errorRecoveryLookaheadTokenLimit: p.errorRecoveryTokenLookaheadLimit, + populateMacroCalls: p.populateMacroCalls, + enableOptionalSyntax: p.enableOptionalSyntax, + enableVariadicOperatorASTs: p.enableVariadicOperatorASTs, + } + buf, ok := source.(runes.Buffer) + if !ok { + buf = runes.NewBuffer(source.Content()) + } + var out ast.Expr + if buf.Len() > p.expressionSizeCodePointLimit { + out = impl.reportError(common.NoLocation, + "expression code point size exceeds limit: size: %d, limit %d", + buf.Len(), p.expressionSizeCodePointLimit) + } else { + out = impl.parse(buf, source.Description()) + } + return ast.NewAST(out, impl.helper.getSourceInfo()), errs +} + +// reservedIds are not legal to use as variables. We exclude them post-parse, as they *are* valid +// field names for protos, and it would complicate the grammar to distinguish the cases. +var reservedIds = map[string]struct{}{ + "as": {}, + "break": {}, + "const": {}, + "continue": {}, + "else": {}, + "false": {}, + "for": {}, + "function": {}, + "if": {}, + "import": {}, + "in": {}, + "let": {}, + "loop": {}, + "package": {}, + "namespace": {}, + "null": {}, + "return": {}, + "true": {}, + "var": {}, + "void": {}, + "while": {}, +} + +// Parse converts a source input a parsed expression. +// This function calls ParseWithMacros with AllMacros. +// +// Deprecated: Use NewParser().Parse() instead. +func Parse(source common.Source) (*ast.AST, *common.Errors) { + return mustNewParser(Macros(AllMacros...)).Parse(source) +} + +type recursionError struct { + message string +} + +// Error implements error. +func (re *recursionError) Error() string { + return re.message +} + +var _ error = &recursionError{} + +type recursionListener struct { + maxDepth int + ruleTypeDepth map[int]*int +} + +func (rl *recursionListener) VisitTerminal(node antlr.TerminalNode) {} + +func (rl *recursionListener) VisitErrorNode(node antlr.ErrorNode) {} + +func (rl *recursionListener) EnterEveryRule(ctx antlr.ParserRuleContext) { + if ctx == nil { + return + } + ruleIndex := ctx.GetRuleIndex() + depth, found := rl.ruleTypeDepth[ruleIndex] + if !found { + var counter = 1 + rl.ruleTypeDepth[ruleIndex] = &counter + depth = &counter + } else { + *depth++ + } + if *depth > rl.maxDepth { + panic(&recursionError{ + message: fmt.Sprintf("expression recursion limit exceeded: %d", rl.maxDepth), + }) + } +} + +func (rl *recursionListener) ExitEveryRule(ctx antlr.ParserRuleContext) { + if ctx == nil { + return + } + ruleIndex := ctx.GetRuleIndex() + if depth, found := rl.ruleTypeDepth[ruleIndex]; found && *depth > 0 { + *depth-- + } +} + +var _ antlr.ParseTreeListener = &recursionListener{} + +type tooManyErrors struct { + errorReportingLimit int +} + +func (t *tooManyErrors) Error() string { + return fmt.Sprintf("More than %d syntax errors", t.errorReportingLimit) +} + +var _ error = &tooManyErrors{} + +type recoveryLimitError struct { + message string +} + +// Error implements error. +func (rl *recoveryLimitError) Error() string { + return rl.message +} + +type lookaheadLimitError struct { + message string +} + +func (ll *lookaheadLimitError) Error() string { + return ll.message +} + +var _ error = &recoveryLimitError{} + +type recoveryLimitErrorStrategy struct { + *antlr.DefaultErrorStrategy + errorRecoveryLimit int + errorRecoveryTokenLookaheadLimit int + recoveryAttempts int +} + +type lookaheadConsumer struct { + antlr.Parser + errorRecoveryTokenLookaheadLimit int + lookaheadAttempts int +} + +func (lc *lookaheadConsumer) Consume() antlr.Token { + if lc.lookaheadAttempts >= lc.errorRecoveryTokenLookaheadLimit { + panic(&lookaheadLimitError{ + message: fmt.Sprintf("error recovery token lookahead limit exceeded: %d", lc.errorRecoveryTokenLookaheadLimit), + }) + } + lc.lookaheadAttempts++ + return lc.Parser.Consume() +} + +func (rl *recoveryLimitErrorStrategy) Recover(recognizer antlr.Parser, e antlr.RecognitionException) { + rl.checkAttempts(recognizer) + lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit} + rl.DefaultErrorStrategy.Recover(lc, e) +} + +func (rl *recoveryLimitErrorStrategy) RecoverInline(recognizer antlr.Parser) antlr.Token { + rl.checkAttempts(recognizer) + lc := &lookaheadConsumer{Parser: recognizer, errorRecoveryTokenLookaheadLimit: rl.errorRecoveryTokenLookaheadLimit} + return rl.DefaultErrorStrategy.RecoverInline(lc) +} + +func (rl *recoveryLimitErrorStrategy) checkAttempts(recognizer antlr.Parser) { + if rl.recoveryAttempts == rl.errorRecoveryLimit { + rl.recoveryAttempts++ + msg := fmt.Sprintf("error recovery attempt limit exceeded: %d", rl.errorRecoveryLimit) + recognizer.NotifyErrorListeners(msg, nil, nil) + panic(&recoveryLimitError{ + message: msg, + }) + } + rl.recoveryAttempts++ +} + +var _ antlr.ErrorStrategy = &recoveryLimitErrorStrategy{} + +type parser struct { + gen.BaseCELVisitor + errors *parseErrors + exprFactory ast.ExprFactory + helper *parserHelper + macros map[string]Macro + recursionDepth int + errorReports int + maxRecursionDepth int + errorReportingLimit int + errorRecoveryLimit int + errorRecoveryLookaheadTokenLimit int + populateMacroCalls bool + enableOptionalSyntax bool + enableVariadicOperatorASTs bool +} + +var _ gen.CELVisitor = (*parser)(nil) + +func (p *parser) parse(expr runes.Buffer, desc string) ast.Expr { + lexer := gen.NewCELLexer(newCharStream(expr, desc)) + lexer.RemoveErrorListeners() + lexer.AddErrorListener(p) + + prsr := gen.NewCELParser(antlr.NewCommonTokenStream(lexer, 0)) + prsr.RemoveErrorListeners() + + prsrListener := &recursionListener{ + maxDepth: p.maxRecursionDepth, + ruleTypeDepth: map[int]*int{}, + } + + prsr.AddErrorListener(p) + prsr.AddParseListener(prsrListener) + + prsr.SetErrorHandler(&recoveryLimitErrorStrategy{ + DefaultErrorStrategy: antlr.NewDefaultErrorStrategy(), + errorRecoveryLimit: p.errorRecoveryLimit, + errorRecoveryTokenLookaheadLimit: p.errorRecoveryLookaheadTokenLimit, + }) + + defer func() { + if val := recover(); val != nil { + switch err := val.(type) { + case *lookaheadLimitError: + p.errors.internalError(err.Error()) + case *recursionError: + p.errors.internalError(err.Error()) + case *tooManyErrors: + // do nothing + case *recoveryLimitError: + // do nothing, listeners already notified and error reported. + default: + panic(val) + } + } + }() + + return p.Visit(prsr.Start_()).(ast.Expr) +} + +// Visitor implementations. +func (p *parser) Visit(tree antlr.ParseTree) any { + t := unnest(tree) + switch tree := t.(type) { + case *gen.StartContext: + return p.VisitStart(tree) + case *gen.ExprContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitExpr(tree) + p.decrementRecursionDepth() + return out + case *gen.ConditionalAndContext: + return p.VisitConditionalAnd(tree) + case *gen.ConditionalOrContext: + return p.VisitConditionalOr(tree) + case *gen.RelationContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitRelation(tree) + p.decrementRecursionDepth() + return out + case *gen.CalcContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitCalc(tree) + p.decrementRecursionDepth() + return out + case *gen.LogicalNotContext: + return p.VisitLogicalNot(tree) + case *gen.IdentOrGlobalCallContext: + return p.VisitIdentOrGlobalCall(tree) + case *gen.SelectContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitSelect(tree) + p.decrementRecursionDepth() + return out + case *gen.MemberCallContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitMemberCall(tree) + p.decrementRecursionDepth() + return out + case *gen.MapInitializerListContext: + return p.VisitMapInitializerList(tree) + case *gen.NegateContext: + return p.VisitNegate(tree) + case *gen.IndexContext: + p.checkAndIncrementRecursionDepth() + out := p.VisitIndex(tree) + p.decrementRecursionDepth() + return out + case *gen.UnaryContext: + return p.VisitUnary(tree) + case *gen.CreateListContext: + return p.VisitCreateList(tree) + case *gen.CreateMessageContext: + return p.VisitCreateMessage(tree) + case *gen.CreateStructContext: + return p.VisitCreateStruct(tree) + case *gen.IntContext: + return p.VisitInt(tree) + case *gen.UintContext: + return p.VisitUint(tree) + case *gen.DoubleContext: + return p.VisitDouble(tree) + case *gen.StringContext: + return p.VisitString(tree) + case *gen.BytesContext: + return p.VisitBytes(tree) + case *gen.BoolFalseContext: + return p.VisitBoolFalse(tree) + case *gen.BoolTrueContext: + return p.VisitBoolTrue(tree) + case *gen.NullContext: + return p.VisitNull(tree) + } + + // Report at least one error if the parser reaches an unknown parse element. + // Typically, this happens if the parser has already encountered a syntax error elsewhere. + if p.errors.errorCount() == 0 { + txt := "<>" + if t != nil { + txt = fmt.Sprintf("<<%T>>", t) + } + return p.reportError(common.NoLocation, "unknown parse element encountered: %s", txt) + } + return p.helper.newExpr(common.NoLocation) + +} + +// Visit a parse tree produced by CELParser#start. +func (p *parser) VisitStart(ctx *gen.StartContext) any { + return p.Visit(ctx.Expr()) +} + +// Visit a parse tree produced by CELParser#expr. +func (p *parser) VisitExpr(ctx *gen.ExprContext) any { + result := p.Visit(ctx.GetE()).(ast.Expr) + if ctx.GetOp() == nil { + return result + } + opID := p.helper.id(ctx.GetOp()) + ifTrue := p.Visit(ctx.GetE1()).(ast.Expr) + ifFalse := p.Visit(ctx.GetE2()).(ast.Expr) + return p.globalCallOrMacro(opID, operators.Conditional, result, ifTrue, ifFalse) +} + +// Visit a parse tree produced by CELParser#conditionalOr. +func (p *parser) VisitConditionalOr(ctx *gen.ConditionalOrContext) any { + result := p.Visit(ctx.GetE()).(ast.Expr) + l := p.newLogicManager(operators.LogicalOr, result) + rest := ctx.GetE1() + for i, op := range ctx.GetOps() { + if i >= len(rest) { + return p.reportError(ctx, "unexpected character, wanted '||'") + } + next := p.Visit(rest[i]).(ast.Expr) + opID := p.helper.id(op) + l.addTerm(opID, next) + } + return l.toExpr() +} + +// Visit a parse tree produced by CELParser#conditionalAnd. +func (p *parser) VisitConditionalAnd(ctx *gen.ConditionalAndContext) any { + result := p.Visit(ctx.GetE()).(ast.Expr) + l := p.newLogicManager(operators.LogicalAnd, result) + rest := ctx.GetE1() + for i, op := range ctx.GetOps() { + if i >= len(rest) { + return p.reportError(ctx, "unexpected character, wanted '&&'") + } + next := p.Visit(rest[i]).(ast.Expr) + opID := p.helper.id(op) + l.addTerm(opID, next) + } + return l.toExpr() +} + +// Visit a parse tree produced by CELParser#relation. +func (p *parser) VisitRelation(ctx *gen.RelationContext) any { + opText := "" + if ctx.GetOp() != nil { + opText = ctx.GetOp().GetText() + } + if op, found := operators.Find(opText); found { + lhs := p.Visit(ctx.Relation(0)).(ast.Expr) + opID := p.helper.id(ctx.GetOp()) + rhs := p.Visit(ctx.Relation(1)).(ast.Expr) + return p.globalCallOrMacro(opID, op, lhs, rhs) + } + return p.reportError(ctx, "operator not found") +} + +// Visit a parse tree produced by CELParser#calc. +func (p *parser) VisitCalc(ctx *gen.CalcContext) any { + opText := "" + if ctx.GetOp() != nil { + opText = ctx.GetOp().GetText() + } + if op, found := operators.Find(opText); found { + lhs := p.Visit(ctx.Calc(0)).(ast.Expr) + opID := p.helper.id(ctx.GetOp()) + rhs := p.Visit(ctx.Calc(1)).(ast.Expr) + return p.globalCallOrMacro(opID, op, lhs, rhs) + } + return p.reportError(ctx, "operator not found") +} + +func (p *parser) VisitUnary(ctx *gen.UnaryContext) any { + return p.helper.newLiteralString(ctx, "<>") +} + +// Visit a parse tree produced by CELParser#LogicalNot. +func (p *parser) VisitLogicalNot(ctx *gen.LogicalNotContext) any { + if len(ctx.GetOps())%2 == 0 { + return p.Visit(ctx.Member()) + } + opID := p.helper.id(ctx.GetOps()[0]) + target := p.Visit(ctx.Member()).(ast.Expr) + return p.globalCallOrMacro(opID, operators.LogicalNot, target) +} + +func (p *parser) VisitNegate(ctx *gen.NegateContext) any { + if len(ctx.GetOps())%2 == 0 { + return p.Visit(ctx.Member()) + } + opID := p.helper.id(ctx.GetOps()[0]) + target := p.Visit(ctx.Member()).(ast.Expr) + return p.globalCallOrMacro(opID, operators.Negate, target) +} + +// VisitSelect visits a parse tree produced by CELParser#Select. +func (p *parser) VisitSelect(ctx *gen.SelectContext) any { + operand := p.Visit(ctx.Member()).(ast.Expr) + // Handle the error case where no valid identifier is specified. + if ctx.GetId() == nil || ctx.GetOp() == nil { + return p.helper.newExpr(ctx) + } + id := ctx.GetId().GetText() + if ctx.GetOpt() != nil { + if !p.enableOptionalSyntax { + return p.reportError(ctx.GetOp(), "unsupported syntax '.?'") + } + return p.helper.newGlobalCall( + ctx.GetOp(), + operators.OptSelect, + operand, + p.helper.newLiteralString(ctx.GetId(), id)) + } + return p.helper.newSelect(ctx.GetOp(), operand, id) +} + +// VisitMemberCall visits a parse tree produced by CELParser#MemberCall. +func (p *parser) VisitMemberCall(ctx *gen.MemberCallContext) any { + operand := p.Visit(ctx.Member()).(ast.Expr) + // Handle the error case where no valid identifier is specified. + if ctx.GetId() == nil { + return p.helper.newExpr(ctx) + } + id := ctx.GetId().GetText() + opID := p.helper.id(ctx.GetOpen()) + return p.receiverCallOrMacro(opID, id, operand, p.visitExprList(ctx.GetArgs())...) +} + +// Visit a parse tree produced by CELParser#Index. +func (p *parser) VisitIndex(ctx *gen.IndexContext) any { + target := p.Visit(ctx.Member()).(ast.Expr) + // Handle the error case where no valid identifier is specified. + if ctx.GetOp() == nil { + return p.helper.newExpr(ctx) + } + opID := p.helper.id(ctx.GetOp()) + index := p.Visit(ctx.GetIndex()).(ast.Expr) + operator := operators.Index + if ctx.GetOpt() != nil { + if !p.enableOptionalSyntax { + return p.reportError(ctx.GetOp(), "unsupported syntax '[?'") + } + operator = operators.OptIndex + } + return p.globalCallOrMacro(opID, operator, target, index) +} + +// Visit a parse tree produced by CELParser#CreateMessage. +func (p *parser) VisitCreateMessage(ctx *gen.CreateMessageContext) any { + messageName := "" + for _, id := range ctx.GetIds() { + if len(messageName) != 0 { + messageName += "." + } + messageName += id.GetText() + } + if ctx.GetLeadingDot() != nil { + messageName = "." + messageName + } + objID := p.helper.id(ctx.GetOp()) + entries := p.VisitIFieldInitializerList(ctx.GetEntries()).([]ast.EntryExpr) + return p.helper.newObject(objID, messageName, entries...) +} + +// Visit a parse tree of field initializers. +func (p *parser) VisitIFieldInitializerList(ctx gen.IFieldInitializerListContext) any { + if ctx == nil || ctx.GetFields() == nil { + // This is the result of a syntax error handled elswhere, return empty. + return []ast.EntryExpr{} + } + + result := make([]ast.EntryExpr, len(ctx.GetFields())) + cols := ctx.GetCols() + vals := ctx.GetValues() + for i, f := range ctx.GetFields() { + if i >= len(cols) || i >= len(vals) { + // This is the result of a syntax error detected elsewhere. + return []ast.EntryExpr{} + } + initID := p.helper.id(cols[i]) + optField := f.(*gen.OptFieldContext) + optional := optField.GetOpt() != nil + if !p.enableOptionalSyntax && optional { + p.reportError(optField, "unsupported syntax '?'") + continue + } + // The field may be empty due to a prior error. + id := optField.IDENTIFIER() + if id == nil { + return []ast.EntryExpr{} + } + fieldName := id.GetText() + value := p.Visit(vals[i]).(ast.Expr) + field := p.helper.newObjectField(initID, fieldName, value, optional) + result[i] = field + } + return result +} + +// Visit a parse tree produced by CELParser#IdentOrGlobalCall. +func (p *parser) VisitIdentOrGlobalCall(ctx *gen.IdentOrGlobalCallContext) any { + identName := "" + if ctx.GetLeadingDot() != nil { + identName = "." + } + // Handle the error case where no valid identifier is specified. + if ctx.GetId() == nil { + return p.helper.newExpr(ctx) + } + // Handle reserved identifiers. + id := ctx.GetId().GetText() + if _, ok := reservedIds[id]; ok { + return p.reportError(ctx, "reserved identifier: %s", id) + } + identName += id + if ctx.GetOp() != nil { + opID := p.helper.id(ctx.GetOp()) + return p.globalCallOrMacro(opID, identName, p.visitExprList(ctx.GetArgs())...) + } + return p.helper.newIdent(ctx.GetId(), identName) +} + +// Visit a parse tree produced by CELParser#CreateList. +func (p *parser) VisitCreateList(ctx *gen.CreateListContext) any { + listID := p.helper.id(ctx.GetOp()) + elems, optionals := p.visitListInit(ctx.GetElems()) + return p.helper.newList(listID, elems, optionals...) +} + +// Visit a parse tree produced by CELParser#CreateStruct. +func (p *parser) VisitCreateStruct(ctx *gen.CreateStructContext) any { + structID := p.helper.id(ctx.GetOp()) + entries := []ast.EntryExpr{} + if ctx.GetEntries() != nil { + entries = p.Visit(ctx.GetEntries()).([]ast.EntryExpr) + } + return p.helper.newMap(structID, entries...) +} + +// Visit a parse tree produced by CELParser#mapInitializerList. +func (p *parser) VisitMapInitializerList(ctx *gen.MapInitializerListContext) any { + if ctx == nil || ctx.GetKeys() == nil { + // This is the result of a syntax error handled elswhere, return empty. + return []ast.EntryExpr{} + } + + result := make([]ast.EntryExpr, len(ctx.GetCols())) + keys := ctx.GetKeys() + vals := ctx.GetValues() + for i, col := range ctx.GetCols() { + colID := p.helper.id(col) + if i >= len(keys) || i >= len(vals) { + // This is the result of a syntax error detected elsewhere. + return []ast.EntryExpr{} + } + optKey := keys[i] + optional := optKey.GetOpt() != nil + if !p.enableOptionalSyntax && optional { + p.reportError(optKey, "unsupported syntax '?'") + continue + } + key := p.Visit(optKey.GetE()).(ast.Expr) + value := p.Visit(vals[i]).(ast.Expr) + entry := p.helper.newMapEntry(colID, key, value, optional) + result[i] = entry + } + return result +} + +// Visit a parse tree produced by CELParser#Int. +func (p *parser) VisitInt(ctx *gen.IntContext) any { + text := ctx.GetTok().GetText() + base := 10 + if strings.HasPrefix(text, "0x") { + base = 16 + text = text[2:] + } + if ctx.GetSign() != nil { + text = ctx.GetSign().GetText() + text + } + i, err := strconv.ParseInt(text, base, 64) + if err != nil { + return p.reportError(ctx, "invalid int literal") + } + return p.helper.newLiteralInt(ctx, i) +} + +// Visit a parse tree produced by CELParser#Uint. +func (p *parser) VisitUint(ctx *gen.UintContext) any { + text := ctx.GetTok().GetText() + // trim the 'u' designator included in the uint literal. + text = text[:len(text)-1] + base := 10 + if strings.HasPrefix(text, "0x") { + base = 16 + text = text[2:] + } + i, err := strconv.ParseUint(text, base, 64) + if err != nil { + return p.reportError(ctx, "invalid uint literal") + } + return p.helper.newLiteralUint(ctx, i) +} + +// Visit a parse tree produced by CELParser#Double. +func (p *parser) VisitDouble(ctx *gen.DoubleContext) any { + txt := ctx.GetTok().GetText() + if ctx.GetSign() != nil { + txt = ctx.GetSign().GetText() + txt + } + f, err := strconv.ParseFloat(txt, 64) + if err != nil { + return p.reportError(ctx, "invalid double literal") + } + return p.helper.newLiteralDouble(ctx, f) + +} + +// Visit a parse tree produced by CELParser#String. +func (p *parser) VisitString(ctx *gen.StringContext) any { + s := p.unquote(ctx, ctx.GetText(), false) + return p.helper.newLiteralString(ctx, s) +} + +// Visit a parse tree produced by CELParser#Bytes. +func (p *parser) VisitBytes(ctx *gen.BytesContext) any { + b := []byte(p.unquote(ctx, ctx.GetTok().GetText()[1:], true)) + return p.helper.newLiteralBytes(ctx, b) +} + +// Visit a parse tree produced by CELParser#BoolTrue. +func (p *parser) VisitBoolTrue(ctx *gen.BoolTrueContext) any { + return p.helper.newLiteralBool(ctx, true) +} + +// Visit a parse tree produced by CELParser#BoolFalse. +func (p *parser) VisitBoolFalse(ctx *gen.BoolFalseContext) any { + return p.helper.newLiteralBool(ctx, false) +} + +// Visit a parse tree produced by CELParser#Null. +func (p *parser) VisitNull(ctx *gen.NullContext) any { + return p.helper.exprFactory.NewLiteral(p.helper.newID(ctx), types.NullValue) +} + +func (p *parser) visitExprList(ctx gen.IExprListContext) []ast.Expr { + if ctx == nil { + return []ast.Expr{} + } + return p.visitSlice(ctx.GetE()) +} + +func (p *parser) visitListInit(ctx gen.IListInitContext) ([]ast.Expr, []int32) { + if ctx == nil { + return []ast.Expr{}, []int32{} + } + elements := ctx.GetElems() + result := make([]ast.Expr, len(elements)) + optionals := []int32{} + for i, e := range elements { + ex := p.Visit(e.GetE()).(ast.Expr) + if ex == nil { + return []ast.Expr{}, []int32{} + } + result[i] = ex + if e.GetOpt() != nil { + if !p.enableOptionalSyntax { + p.reportError(e.GetOpt(), "unsupported syntax '?'") + continue + } + optionals = append(optionals, int32(i)) + } + } + return result, optionals +} + +func (p *parser) visitSlice(expressions []gen.IExprContext) []ast.Expr { + if expressions == nil { + return []ast.Expr{} + } + result := make([]ast.Expr, len(expressions)) + for i, e := range expressions { + ex := p.Visit(e).(ast.Expr) + result[i] = ex + } + return result +} + +func (p *parser) unquote(ctx any, value string, isBytes bool) string { + text, err := unescape(value, isBytes) + if err != nil { + p.reportError(ctx, "%s", err.Error()) + return value + } + return text +} + +func (p *parser) newLogicManager(function string, term ast.Expr) *logicManager { + if p.enableVariadicOperatorASTs { + return newVariadicLogicManager(p.exprFactory, function, term) + } + return newBalancingLogicManager(p.exprFactory, function, term) +} + +func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr { + var location common.Location + err := p.helper.newExpr(ctx) + switch c := ctx.(type) { + case common.Location: + location = c + case antlr.Token, antlr.ParserRuleContext: + location = p.helper.getLocation(err.ID()) + } + // Provide arguments to the report error. + p.errors.reportErrorAtID(err.ID(), location, format, args...) + return err +} + +// ANTLR Parse listener implementations +func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) { + offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column)) + l := p.helper.getLocationByOffset(offset) + // Hack to keep existing error messages consistent with previous versions of CEL when a reserved word + // is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error + // messages out of ANTLR to prevent future breaking changes related to error message content. + if strings.Contains(msg, "no viable alternative") { + msg = reservedIdentifier.ReplaceAllString(msg, mismatchedReservedIdentifier) + } + // Ensure that no more than 100 syntax errors are reported as this will halt attempts to recover from a + // seriously broken expression. + if p.errorReports < p.errorReportingLimit { + p.errorReports++ + p.errors.syntaxError(l, msg) + } else { + tme := &tooManyErrors{errorReportingLimit: p.errorReportingLimit} + p.errors.syntaxError(l, tme.Error()) + panic(tme) + } +} + +func (p *parser) ReportAmbiguity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, exact bool, ambigAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { + // Intentional +} + +func (p *parser) ReportAttemptingFullContext(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex int, conflictingAlts *antlr.BitSet, configs *antlr.ATNConfigSet) { + // Intentional +} + +func (p *parser) ReportContextSensitivity(recognizer antlr.Parser, dfa *antlr.DFA, startIndex, stopIndex, prediction int, configs *antlr.ATNConfigSet) { + // Intentional +} + +func (p *parser) globalCallOrMacro(exprID int64, function string, args ...ast.Expr) ast.Expr { + if expr, found := p.expandMacro(exprID, function, nil, args...); found { + return expr + } + return p.helper.newGlobalCall(exprID, function, args...) +} + +func (p *parser) receiverCallOrMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) ast.Expr { + if expr, found := p.expandMacro(exprID, function, target, args...); found { + return expr + } + return p.helper.newReceiverCall(exprID, function, target, args...) +} + +func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, args ...ast.Expr) (ast.Expr, bool) { + macro, found := p.macros[makeMacroKey(function, len(args), target != nil)] + if !found { + macro, found = p.macros[makeVarArgMacroKey(function, target != nil)] + if !found { + return nil, false + } + } + eh := exprHelperPool.Get().(*exprHelper) + defer exprHelperPool.Put(eh) + eh.parserHelper = p.helper + eh.id = exprID + expr, err := macro.Expander()(eh, target, args) + // An error indicates that the macro was matched, but the arguments were not well-formed. + if err != nil { + loc := err.Location + if loc == nil { + loc = p.helper.getLocation(exprID) + } + p.helper.deleteID(exprID) + return p.reportError(loc, err.Message), true + } + // A nil value from the macro indicates that the macro implementation decided that + // an expansion should not be performed. + if expr == nil { + return nil, false + } + if p.populateMacroCalls { + p.helper.addMacroCall(expr.ID(), function, target, args...) + } + p.helper.deleteID(exprID) + return expr, true +} + +func (p *parser) checkAndIncrementRecursionDepth() { + p.recursionDepth++ + if p.recursionDepth > p.maxRecursionDepth { + panic(&recursionError{message: "max recursion depth exceeded"}) + } +} + +func (p *parser) decrementRecursionDepth() { + p.recursionDepth-- +} + +// unnest traverses down the left-hand side of the parse graph until it encounters the first compound +// parse node or the first leaf in the parse graph. +func unnest(tree antlr.ParseTree) antlr.ParseTree { + for tree != nil { + switch t := tree.(type) { + case *gen.ExprContext: + // conditionalOr op='?' conditionalOr : expr + if t.GetOp() != nil { + return t + } + // conditionalOr + tree = t.GetE() + case *gen.ConditionalOrContext: + // conditionalAnd (ops=|| conditionalAnd)* + if t.GetOps() != nil && len(t.GetOps()) > 0 { + return t + } + // conditionalAnd + tree = t.GetE() + case *gen.ConditionalAndContext: + // relation (ops=&& relation)* + if t.GetOps() != nil && len(t.GetOps()) > 0 { + return t + } + // relation + tree = t.GetE() + case *gen.RelationContext: + // relation op relation + if t.GetOp() != nil { + return t + } + // calc + tree = t.Calc() + case *gen.CalcContext: + // calc op calc + if t.GetOp() != nil { + return t + } + // unary + tree = t.Unary() + case *gen.MemberExprContext: + // member expands to one of: primary, select, index, or create message + tree = t.Member() + case *gen.PrimaryExprContext: + // primary expands to one of identifier, nested, create list, create struct, literal + tree = t.Primary() + case *gen.NestedContext: + // contains a nested 'expr' + tree = t.GetE() + case *gen.ConstantLiteralContext: + // expands to a primitive literal + tree = t.Literal() + default: + return t + } + } + return tree +} + +var ( + reservedIdentifier = regexp.MustCompile("no viable alternative at input '.(true|false|null)'") + mismatchedReservedIdentifier = "mismatched input '$1' expecting IDENTIFIER" +) diff --git a/vendor/github.com/google/cel-go/parser/unescape.go b/vendor/github.com/google/cel-go/parser/unescape.go new file mode 100644 index 00000000..27c57a9f --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/unescape.go @@ -0,0 +1,237 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// Unescape takes a quoted string, unquotes, and unescapes it. +// +// This function performs escaping compatible with GoogleSQL. +func unescape(value string, isBytes bool) (string, error) { + // All strings normalize newlines to the \n representation. + value = newlineNormalizer.Replace(value) + n := len(value) + + // Nothing to unescape / decode. + if n < 2 { + return value, fmt.Errorf("unable to unescape string") + } + + // Raw string preceded by the 'r|R' prefix. + isRawLiteral := false + if value[0] == 'r' || value[0] == 'R' { + value = value[1:] + n = len(value) + isRawLiteral = true + } + + // Quoted string of some form, must have same first and last char. + if value[0] != value[n-1] || (value[0] != '"' && value[0] != '\'') { + return value, fmt.Errorf("unable to unescape string") + } + + // Normalize the multi-line CEL string representation to a standard + // Go quoted string. + if n >= 6 { + if strings.HasPrefix(value, "'''") { + if !strings.HasSuffix(value, "'''") { + return value, fmt.Errorf("unable to unescape string") + } + value = "\"" + value[3:n-3] + "\"" + } else if strings.HasPrefix(value, `"""`) { + if !strings.HasSuffix(value, `"""`) { + return value, fmt.Errorf("unable to unescape string") + } + value = "\"" + value[3:n-3] + "\"" + } + n = len(value) + } + value = value[1 : n-1] + // If there is nothing to escape, then return. + if isRawLiteral || !strings.ContainsRune(value, '\\') { + return value, nil + } + + // Otherwise the string contains escape characters. + // The following logic is adapted from `strconv/quote.go` + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*n/2) + for len(value) > 0 { + c, encode, rest, err := unescapeChar(value, isBytes) + if err != nil { + return "", err + } + value = rest + if c < utf8.RuneSelf || !encode { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + } + return string(buf), nil +} + +// unescapeChar takes a string input and returns the following info: +// +// value - the escaped unicode rune at the front of the string. +// encode - the value should be unicode-encoded +// tail - the remainder of the input string. +// err - error value, if the character could not be unescaped. +// +// When encode is true the return value may still fit within a single byte, +// but unicode encoding is attempted which is more expensive than when the +// value is known to self-represent as a single byte. +// +// If isBytes is set, unescape as a bytes literal so octal and hex escapes +// represent byte values, not unicode code points. +func unescapeChar(s string, isBytes bool) (value rune, encode bool, tail string, err error) { + // 1. Character is not an escape sequence. + switch c := s[0]; { + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // 2. Last character is the start of an escape sequence. + if len(s) <= 1 { + err = fmt.Errorf("unable to unescape string, found '\\' as last character") + return + } + + c := s[1] + s = s[2:] + // 3. Common escape sequences shared with Google SQL + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case '\\': + value = '\\' + case '\'': + value = '\'' + case '"': + value = '"' + case '`': + value = '`' + case '?': + value = '?' + + // 4. Unicode escape sequences, reproduced from `strconv/quote.go` + case 'x', 'X', 'u', 'U': + n := 0 + encode = true + switch c { + case 'x', 'X': + n = 2 + encode = !isBytes + case 'u': + n = 4 + if isBytes { + err = fmt.Errorf("unable to unescape string") + return + } + case 'U': + n = 8 + if isBytes { + err = fmt.Errorf("unable to unescape string") + return + } + } + var v rune + if len(s) < n { + err = fmt.Errorf("unable to unescape string") + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = fmt.Errorf("unable to unescape string") + return + } + v = v<<4 | x + } + s = s[n:] + if !isBytes && v > utf8.MaxRune { + err = fmt.Errorf("unable to unescape string") + return + } + value = v + + // 5. Octal escape sequences, must be three digits \[0-3][0-7][0-7] + case '0', '1', '2', '3': + if len(s) < 2 { + err = fmt.Errorf("unable to unescape octal sequence in string") + return + } + v := rune(c - '0') + for j := 0; j < 2; j++ { + x := s[j] + if x < '0' || x > '7' { + err = fmt.Errorf("unable to unescape octal sequence in string") + return + } + v = v*8 + rune(x-'0') + } + if !isBytes && v > utf8.MaxRune { + err = fmt.Errorf("unable to unescape string") + return + } + value = v + s = s[2:] + encode = !isBytes + + // Unknown escape sequence. + default: + err = fmt.Errorf("unable to unescape string") + } + + tail = s + return +} + +func unhex(b byte) (rune, bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return 0, false +} + +var ( + newlineNormalizer = strings.NewReplacer("\r\n", "\n", "\r", "\n") +) diff --git a/vendor/github.com/google/cel-go/parser/unparser.go b/vendor/github.com/google/cel-go/parser/unparser.go new file mode 100644 index 00000000..91cf7294 --- /dev/null +++ b/vendor/github.com/google/cel-go/parser/unparser.go @@ -0,0 +1,629 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package parser + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types" +) + +// Unparse takes an input expression and source position information and generates a human-readable +// expression. +// +// Note, unparsing an AST will often generate the same expression as was originally parsed, but some +// formatting may be lost in translation, notably: +// +// - All quoted literals are doubled quoted. +// - Byte literals are represented as octal escapes (same as Google SQL). +// - Floating point values are converted to the small number of digits needed to represent the value. +// - Spacing around punctuation marks may be lost. +// - Parentheses will only be applied when they affect operator precedence. +// +// This function optionally takes in one or more UnparserOption to alter the unparsing behavior, such as +// performing word wrapping on expressions. +func Unparse(expr ast.Expr, info *ast.SourceInfo, opts ...UnparserOption) (string, error) { + unparserOpts := &unparserOption{ + wrapOnColumn: defaultWrapOnColumn, + wrapAfterColumnLimit: defaultWrapAfterColumnLimit, + operatorsToWrapOn: defaultOperatorsToWrapOn, + } + + var err error + for _, opt := range opts { + unparserOpts, err = opt(unparserOpts) + if err != nil { + return "", err + } + } + + un := &unparser{ + info: info, + options: unparserOpts, + } + err = un.visit(expr) + if err != nil { + return "", err + } + return un.str.String(), nil +} + +// unparser visits an expression to reconstruct a human-readable string from an AST. +type unparser struct { + str strings.Builder + info *ast.SourceInfo + options *unparserOption + lastWrappedIndex int +} + +func (un *unparser) visit(expr ast.Expr) error { + if expr == nil { + return errors.New("unsupported expression") + } + visited, err := un.visitMaybeMacroCall(expr) + if visited || err != nil { + return err + } + switch expr.Kind() { + case ast.CallKind: + return un.visitCall(expr) + case ast.LiteralKind: + return un.visitConst(expr) + case ast.IdentKind: + return un.visitIdent(expr) + case ast.ListKind: + return un.visitList(expr) + case ast.MapKind: + return un.visitStructMap(expr) + case ast.SelectKind: + return un.visitSelect(expr) + case ast.StructKind: + return un.visitStructMsg(expr) + default: + return fmt.Errorf("unsupported expression: %v", expr) + } +} + +func (un *unparser) visitCall(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + switch fun { + // ternary operator + case operators.Conditional: + return un.visitCallConditional(expr) + // optional select operator + case operators.OptSelect: + return un.visitOptSelect(expr) + // index operator + case operators.Index: + return un.visitCallIndex(expr) + // optional index operator + case operators.OptIndex: + return un.visitCallOptIndex(expr) + // unary operators + case operators.LogicalNot, operators.Negate: + return un.visitCallUnary(expr) + // binary operators + case operators.Add, + operators.Divide, + operators.Equals, + operators.Greater, + operators.GreaterEquals, + operators.In, + operators.Less, + operators.LessEquals, + operators.LogicalAnd, + operators.LogicalOr, + operators.Modulo, + operators.Multiply, + operators.NotEquals, + operators.OldIn, + operators.Subtract: + return un.visitCallBinary(expr) + // standard function calls. + default: + return un.visitCallFunc(expr) + } +} + +func (un *unparser) visitCallBinary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + lhs := args[0] + // add parens if the current operator is lower precedence than the lhs expr operator. + lhsParen := isComplexOperatorWithRespectTo(fun, lhs) + rhs := args[1] + // add parens if the current operator is lower precedence than the rhs expr operator, + // or the same precedence and the operator is left recursive. + rhsParen := isComplexOperatorWithRespectTo(fun, rhs) + if !rhsParen && isLeftRecursive(fun) { + rhsParen = isSamePrecedence(fun, rhs) + } + err := un.visitMaybeNested(lhs, lhsParen) + if err != nil { + return err + } + unmangled, found := operators.FindReverseBinaryOperator(fun) + if !found { + return fmt.Errorf("cannot unmangle operator: %s", fun) + } + + un.writeOperatorWithWrapping(fun, unmangled) + return un.visitMaybeNested(rhs, rhsParen) +} + +func (un *unparser) visitCallConditional(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() + // add parens if operand is a conditional itself. + nested := isSamePrecedence(operators.Conditional, args[0]) || + isComplexOperator(args[0]) + err := un.visitMaybeNested(args[0], nested) + if err != nil { + return err + } + un.writeOperatorWithWrapping(operators.Conditional, "?") + + // add parens if operand is a conditional itself. + nested = isSamePrecedence(operators.Conditional, args[1]) || + isComplexOperator(args[1]) + err = un.visitMaybeNested(args[1], nested) + if err != nil { + return err + } + + un.str.WriteString(" : ") + // add parens if operand is a conditional itself. + nested = isSamePrecedence(operators.Conditional, args[2]) || + isComplexOperator(args[2]) + + return un.visitMaybeNested(args[2], nested) +} + +func (un *unparser) visitCallFunc(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + if c.IsMemberFunction() { + nested := isBinaryOrTernaryOperator(c.Target()) + err := un.visitMaybeNested(c.Target(), nested) + if err != nil { + return err + } + un.str.WriteString(".") + } + un.str.WriteString(fun) + un.str.WriteString("(") + for i, arg := range args { + err := un.visit(arg) + if err != nil { + return err + } + if i < len(args)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString(")") + return nil +} + +func (un *unparser) visitCallIndex(expr ast.Expr) error { + return un.visitCallIndexInternal(expr, "[") +} + +func (un *unparser) visitCallOptIndex(expr ast.Expr) error { + return un.visitCallIndexInternal(expr, "[?") +} + +func (un *unparser) visitCallIndexInternal(expr ast.Expr, op string) error { + c := expr.AsCall() + args := c.Args() + nested := isBinaryOrTernaryOperator(args[0]) + err := un.visitMaybeNested(args[0], nested) + if err != nil { + return err + } + un.str.WriteString(op) + err = un.visit(args[1]) + if err != nil { + return err + } + un.str.WriteString("]") + return nil +} + +func (un *unparser) visitCallUnary(expr ast.Expr) error { + c := expr.AsCall() + fun := c.FunctionName() + args := c.Args() + unmangled, found := operators.FindReverse(fun) + if !found { + return fmt.Errorf("cannot unmangle operator: %s", fun) + } + un.str.WriteString(unmangled) + nested := isComplexOperator(args[0]) + return un.visitMaybeNested(args[0], nested) +} + +func (un *unparser) visitConst(expr ast.Expr) error { + val := expr.AsLiteral() + switch val := val.(type) { + case types.Bool: + un.str.WriteString(strconv.FormatBool(bool(val))) + case types.Bytes: + // bytes constants are surrounded with b"" + un.str.WriteString(`b"`) + un.str.WriteString(bytesToOctets([]byte(val))) + un.str.WriteString(`"`) + case types.Double: + // represent the float using the minimum required digits + d := strconv.FormatFloat(float64(val), 'g', -1, 64) + un.str.WriteString(d) + if !strings.Contains(d, ".") { + un.str.WriteString(".0") + } + case types.Int: + i := strconv.FormatInt(int64(val), 10) + un.str.WriteString(i) + case types.Null: + un.str.WriteString("null") + case types.String: + // strings will be double quoted with quotes escaped. + un.str.WriteString(strconv.Quote(string(val))) + case types.Uint: + // uint literals have a 'u' suffix. + ui := strconv.FormatUint(uint64(val), 10) + un.str.WriteString(ui) + un.str.WriteString("u") + default: + return fmt.Errorf("unsupported constant: %v", expr) + } + return nil +} + +func (un *unparser) visitIdent(expr ast.Expr) error { + un.str.WriteString(expr.AsIdent()) + return nil +} + +func (un *unparser) visitList(expr ast.Expr) error { + l := expr.AsList() + elems := l.Elements() + optIndices := make(map[int]bool, len(elems)) + for _, idx := range l.OptionalIndices() { + optIndices[int(idx)] = true + } + un.str.WriteString("[") + for i, elem := range elems { + if optIndices[i] { + un.str.WriteString("?") + } + err := un.visit(elem) + if err != nil { + return err + } + if i < len(elems)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString("]") + return nil +} + +func (un *unparser) visitOptSelect(expr ast.Expr) error { + c := expr.AsCall() + args := c.Args() + operand := args[0] + field := args[1].AsLiteral().(types.String) + return un.visitSelectInternal(operand, false, ".?", string(field)) +} + +func (un *unparser) visitSelect(expr ast.Expr) error { + sel := expr.AsSelect() + return un.visitSelectInternal(sel.Operand(), sel.IsTestOnly(), ".", sel.FieldName()) +} + +func (un *unparser) visitSelectInternal(operand ast.Expr, testOnly bool, op string, field string) error { + // handle the case when the select expression was generated by the has() macro. + if testOnly { + un.str.WriteString("has(") + } + nested := !testOnly && isBinaryOrTernaryOperator(operand) + err := un.visitMaybeNested(operand, nested) + if err != nil { + return err + } + un.str.WriteString(op) + un.str.WriteString(field) + if testOnly { + un.str.WriteString(")") + } + return nil +} + +func (un *unparser) visitStructMsg(expr ast.Expr) error { + m := expr.AsStruct() + fields := m.Fields() + un.str.WriteString(m.TypeName()) + un.str.WriteString("{") + for i, f := range fields { + field := f.AsStructField() + f := field.Name() + if field.IsOptional() { + un.str.WriteString("?") + } + un.str.WriteString(f) + un.str.WriteString(": ") + v := field.Value() + err := un.visit(v) + if err != nil { + return err + } + if i < len(fields)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString("}") + return nil +} + +func (un *unparser) visitStructMap(expr ast.Expr) error { + m := expr.AsMap() + entries := m.Entries() + un.str.WriteString("{") + for i, e := range entries { + entry := e.AsMapEntry() + k := entry.Key() + if entry.IsOptional() { + un.str.WriteString("?") + } + err := un.visit(k) + if err != nil { + return err + } + un.str.WriteString(": ") + v := entry.Value() + err = un.visit(v) + if err != nil { + return err + } + if i < len(entries)-1 { + un.str.WriteString(", ") + } + } + un.str.WriteString("}") + return nil +} + +func (un *unparser) visitMaybeMacroCall(expr ast.Expr) (bool, error) { + call, found := un.info.GetMacroCall(expr.ID()) + if !found { + return false, nil + } + return true, un.visit(call) +} + +func (un *unparser) visitMaybeNested(expr ast.Expr, nested bool) error { + if nested { + un.str.WriteString("(") + } + err := un.visit(expr) + if err != nil { + return err + } + if nested { + un.str.WriteString(")") + } + return nil +} + +// isLeftRecursive indicates whether the parser resolves the call in a left-recursive manner as +// this can have an effect of how parentheses affect the order of operations in the AST. +func isLeftRecursive(op string) bool { + return op != operators.LogicalAnd && op != operators.LogicalOr +} + +// isSamePrecedence indicates whether the precedence of the input operator is the same as the +// precedence of the (possible) operation represented in the input Expr. +// +// If the expr is not a Call, the result is false. +func isSamePrecedence(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind { + return false + } + c := expr.AsCall() + other := c.FunctionName() + return operators.Precedence(op) == operators.Precedence(other) +} + +// isLowerPrecedence indicates whether the precedence of the input operator is lower precedence +// than the (possible) operation represented in the input Expr. +// +// If the expr is not a Call, the result is false. +func isLowerPrecedence(op string, expr ast.Expr) bool { + c := expr.AsCall() + other := c.FunctionName() + return operators.Precedence(op) < operators.Precedence(other) +} + +// Indicates whether the expr is a complex operator, i.e., a call expression +// with 2 or more arguments. +func isComplexOperator(expr ast.Expr) bool { + if expr.Kind() == ast.CallKind && len(expr.AsCall().Args()) >= 2 { + return true + } + return false +} + +// Indicates whether it is a complex operation compared to another. +// expr is *not* considered complex if it is not a call expression or has +// less than two arguments, or if it has a higher precedence than op. +func isComplexOperatorWithRespectTo(op string, expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { + return false + } + return isLowerPrecedence(op, expr) +} + +// Indicate whether this is a binary or ternary operator. +func isBinaryOrTernaryOperator(expr ast.Expr) bool { + if expr.Kind() != ast.CallKind || len(expr.AsCall().Args()) < 2 { + return false + } + _, isBinaryOp := operators.FindReverseBinaryOperator(expr.AsCall().FunctionName()) + return isBinaryOp || isSamePrecedence(operators.Conditional, expr) +} + +// bytesToOctets converts byte sequences to a string using a three digit octal encoded value +// per byte. +func bytesToOctets(byteVal []byte) string { + var b strings.Builder + for _, c := range byteVal { + fmt.Fprintf(&b, "\\%03o", c) + } + return b.String() +} + +// writeOperatorWithWrapping outputs the operator and inserts a newline for operators configured +// in the unparser options. +func (un *unparser) writeOperatorWithWrapping(fun string, unmangled string) bool { + _, wrapOperatorExists := un.options.operatorsToWrapOn[fun] + lineLength := un.str.Len() - un.lastWrappedIndex + len(fun) + + if wrapOperatorExists && lineLength >= un.options.wrapOnColumn { + un.lastWrappedIndex = un.str.Len() + // wrapAfterColumnLimit flag dictates whether the newline is placed + // before or after the operator + if un.options.wrapAfterColumnLimit { + // Input: a && b + // Output: a &&\nb + un.str.WriteString(" ") + un.str.WriteString(unmangled) + un.str.WriteString("\n") + } else { + // Input: a && b + // Output: a\n&& b + un.str.WriteString("\n") + un.str.WriteString(unmangled) + un.str.WriteString(" ") + } + return true + } + un.str.WriteString(" ") + un.str.WriteString(unmangled) + un.str.WriteString(" ") + return false +} + +// Defined defaults for the unparser options +var ( + defaultWrapOnColumn = 80 + defaultWrapAfterColumnLimit = true + defaultOperatorsToWrapOn = map[string]bool{ + operators.LogicalAnd: true, + operators.LogicalOr: true, + } +) + +// UnparserOption is a functional option for configuring the output formatting +// of the Unparse function. +type UnparserOption func(*unparserOption) (*unparserOption, error) + +// Internal representation of the UnparserOption type +type unparserOption struct { + wrapOnColumn int + operatorsToWrapOn map[string]bool + wrapAfterColumnLimit bool +} + +// WrapOnColumn wraps the output expression when its string length exceeds a specified limit +// for operators set by WrapOnOperators function or by default, "&&" and "||" will be wrapped. +// +// Example usage: +// +// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd)) +// +// This will insert a newline immediately after the logical AND operator for the below example input: +// +// Input: +// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m') +// +// Output: +// 'my-principal-group' in request.auth.claims && +// request.auth.claims.iat > now - duration('5m') +func WrapOnColumn(col int) UnparserOption { + return func(opt *unparserOption) (*unparserOption, error) { + if col < 1 { + return nil, fmt.Errorf("Invalid unparser option. Wrap column value must be greater than or equal to 1. Got %v instead", col) + } + opt.wrapOnColumn = col + return opt, nil + } +} + +// WrapOnOperators specifies which operators to perform word wrapping on an output expression when its string length +// exceeds the column limit set by WrapOnColumn function. +// +// Word wrapping is supported on non-unary symbolic operators. Refer to operators.go for the full list +// +// This will replace any previously supplied operators instead of merging them. +func WrapOnOperators(symbols ...string) UnparserOption { + return func(opt *unparserOption) (*unparserOption, error) { + opt.operatorsToWrapOn = make(map[string]bool) + for _, symbol := range symbols { + _, found := operators.FindReverse(symbol) + if !found { + return nil, fmt.Errorf("Invalid unparser option. Unsupported operator: %s", symbol) + } + arity := operators.Arity(symbol) + if arity < 2 { + return nil, fmt.Errorf("Invalid unparser option. Unary operators are unsupported: %s", symbol) + } + + opt.operatorsToWrapOn[symbol] = true + } + + return opt, nil + } +} + +// WrapAfterColumnLimit dictates whether to insert a newline before or after the specified operator +// when word wrapping is performed. +// +// Example usage: +// +// Unparse(expr, sourceInfo, WrapOnColumn(40), WrapOnOperators(Operators.LogicalAnd), WrapAfterColumnLimit(false)) +// +// This will insert a newline immediately before the logical AND operator for the below example input, ensuring +// that the length of a line never exceeds the specified column limit: +// +// Input: +// 'my-principal-group' in request.auth.claims && request.auth.claims.iat > now - duration('5m') +// +// Output: +// 'my-principal-group' in request.auth.claims +// && request.auth.claims.iat > now - duration('5m') +func WrapAfterColumnLimit(wrapAfter bool) UnparserOption { + return func(opt *unparserOption) (*unparserOption, error) { + opt.wrapAfterColumnLimit = wrapAfter + return opt, nil + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE new file mode 100644 index 00000000..36451625 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 00000000..b8fbb2b7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@org_golang_google_grpc//grpclog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go new file mode 100644 index 00000000..3cd93729 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -0,0 +1,121 @@ +package httprule + +import ( + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +) + +const ( + opcodeVersion = 1 +) + +// Template is a compiled representation of path templates. +type Template struct { + // Version is the version number of the format. + Version int + // OpCodes is a sequence of operations. + OpCodes []int + // Pool is a constant pool + Pool []string + // Verb is a VERB part in the template. + Verb string + // Fields is a list of field paths bound in this template. + Fields []string + // Original template (example: /v1/a_bit_of_everything) + Template string +} + +// Compiler compiles utilities representation of path templates into marshallable operations. +// They can be unmarshalled by runtime.NewPattern. +type Compiler interface { + Compile() Template +} + +type op struct { + // code is the opcode of the operation + code utilities.OpCode + + // str is a string operand of the code. + // num is ignored if str is not empty. + str string + + // num is a numeric operand of the code. + num int +} + +func (w wildcard) compile() []op { + return []op{ + {code: utilities.OpPush}, + } +} + +func (w deepWildcard) compile() []op { + return []op{ + {code: utilities.OpPushM}, + } +} + +func (l literal) compile() []op { + return []op{ + { + code: utilities.OpLitPush, + str: string(l), + }, + } +} + +func (v variable) compile() []op { + var ops []op + for _, s := range v.segments { + ops = append(ops, s.compile()...) + } + ops = append(ops, op{ + code: utilities.OpConcatN, + num: len(v.segments), + }, op{ + code: utilities.OpCapture, + str: v.path, + }) + + return ops +} + +func (t template) Compile() Template { + var rawOps []op + for _, s := range t.segments { + rawOps = append(rawOps, s.compile()...) + } + + var ( + ops []int + pool []string + fields []string + ) + consts := make(map[string]int) + for _, op := range rawOps { + ops = append(ops, int(op.code)) + if op.str == "" { + ops = append(ops, op.num) + } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } + if _, ok := consts[op.str]; !ok { + consts[op.str] = len(pool) + pool = append(pool, op.str) + } + ops = append(ops, consts[op.str]) + } + if op.code == utilities.OpCapture { + fields = append(fields, op.str) + } + } + return Template{ + Version: opcodeVersion, + OpCodes: ops, + Pool: pool, + Verb: t.verb, + Fields: fields, + Template: t.template, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go new file mode 100644 index 00000000..c056bd30 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go @@ -0,0 +1,11 @@ +//go:build gofuzz +// +build gofuzz + +package httprule + +func Fuzz(data []byte) int { + if _, err := Parse(string(data)); err != nil { + return 0 + } + return 0 +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go new file mode 100644 index 00000000..65ffcf5c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -0,0 +1,368 @@ +package httprule + +import ( + "errors" + "fmt" + "strings" +) + +// InvalidTemplateError indicates that the path template is not valid. +type InvalidTemplateError struct { + tmpl string + msg string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.tmpl) +} + +// Parse parses the string representation of path template +func Parse(tmpl string) (Compiler, error) { + if !strings.HasPrefix(tmpl, "/") { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"} + } + tokens, verb := tokenize(tmpl[1:]) + + p := parser{tokens: tokens} + segs, err := p.topLevelSegments() + if err != nil { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()} + } + + return template{ + segments: segs, + verb: verb, + template: tmpl, + }, nil +} + +func tokenize(path string) (tokens []string, verb string) { + if path == "" { + return []string{eof}, "" + } + + const ( + init = iota + field + nested + ) + st := init + for path != "" { + var idx int + switch st { + case init: + idx = strings.IndexAny(path, "/{") + case field: + idx = strings.IndexAny(path, ".=}") + case nested: + idx = strings.IndexAny(path, "/}") + } + if idx < 0 { + tokens = append(tokens, path) + break + } + switch r := path[idx]; r { + case '/', '.': + case '{': + st = field + case '=': + st = nested + case '}': + st = init + } + if idx == 0 { + tokens = append(tokens, path[idx:idx+1]) + } else { + tokens = append(tokens, path[:idx], path[idx:idx+1]) + } + path = path[idx+1:] + } + + l := len(tokens) + // See + // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ; + // although normal and backwards-compat logic here is to use the last index + // of a colon, if the final segment is a variable followed by a colon, the + // part following the colon must be a verb. Hence if the previous token is + // an end var marker, we switch the index we're looking for to Index instead + // of LastIndex, so that we correctly grab the remaining part of the path as + // the verb. + var penultimateTokenIsEndVar bool + switch l { + case 0, 1: + // Not enough to be variable so skip this logic and don't result in an + // invalid index + default: + penultimateTokenIsEndVar = tokens[l-2] == "}" + } + t := tokens[l-1] + var idx int + if penultimateTokenIsEndVar { + idx = strings.Index(t, ":") + } else { + idx = strings.LastIndex(t, ":") + } + if idx == 0 { + tokens, verb = tokens[:l-1], t[1:] + } else if idx > 0 { + tokens[l-1], verb = t[:idx], t[idx+1:] + } + tokens = append(tokens, eof) + return tokens, verb +} + +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. +type parser struct { + tokens []string + accepted []string +} + +// topLevelSegments is the target of this parser. +func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } + segs, err := p.segments() + if err != nil { + return nil, err + } + if _, err := p.accept(typeEOF); err != nil { + return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, "")) + } + return segs, nil +} + +func (p *parser) segments() ([]segment, error) { + s, err := p.segment() + if err != nil { + return nil, err + } + + segs := []segment{s} + for { + if _, err := p.accept("/"); err != nil { + return segs, nil + } + s, err := p.segment() + if err != nil { + return segs, err + } + segs = append(segs, s) + } +} + +func (p *parser) segment() (segment, error) { + if _, err := p.accept("*"); err == nil { + return wildcard{}, nil + } + if _, err := p.accept("**"); err == nil { + return deepWildcard{}, nil + } + if l, err := p.literal(); err == nil { + return l, nil + } + + v, err := p.variable() + if err != nil { + return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err) + } + return v, nil +} + +func (p *parser) literal() (segment, error) { + lit, err := p.accept(typeLiteral) + if err != nil { + return nil, err + } + return literal(lit), nil +} + +func (p *parser) variable() (segment, error) { + if _, err := p.accept("{"); err != nil { + return nil, err + } + + path, err := p.fieldPath() + if err != nil { + return nil, err + } + + var segs []segment + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { + return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err) + } + } else { + segs = []segment{wildcard{}} + } + + if _, err := p.accept("}"); err != nil { + return nil, fmt.Errorf("unterminated variable segment: %s", path) + } + return variable{ + path: path, + segments: segs, + }, nil +} + +func (p *parser) fieldPath() (string, error) { + c, err := p.accept(typeIdent) + if err != nil { + return "", err + } + components := []string{c} + for { + if _, err := p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { + return "", fmt.Errorf("invalid field path component: %w", err) + } + components = append(components, c) + } +} + +// A termType is a type of terminal symbols. +type termType string + +// These constants define some of valid values of termType. +// They improve readability of parse functions. +// +// You can also use "/", "*", "**", "." or "=" as valid values. +const ( + typeIdent = termType("ident") + typeLiteral = termType("literal") + typeEOF = termType("$") +) + +// eof is the terminal symbol which always appears at the end of token sequence. +const eof = "\u0000" + +// accept tries to accept a token in "p". +// This function consumes a token and returns it if it matches to the specified "term". +// If it doesn't match, the function does not consume any tokens and return an error. +func (p *parser) accept(term termType) (string, error) { + t := p.tokens[0] + switch term { + case "/", "*", "**", ".", "=", "{", "}": + if t != string(term) && t != "/" { + return "", fmt.Errorf("expected %q but got %q", term, t) + } + case typeEOF: + if t != eof { + return "", fmt.Errorf("expected EOF but got %q", t) + } + case typeIdent: + if err := expectIdent(t); err != nil { + return "", err + } + case typeLiteral: + if err := expectPChars(t); err != nil { + return "", err + } + default: + return "", fmt.Errorf("unknown termType %q", term) + } + p.tokens = p.tokens[1:] + p.accepted = append(p.accepted, t) + return t, nil +} + +// expectPChars determines if "t" consists of only pchars defined in RFC3986. +// +// https://www.ietf.org/rfc/rfc3986.txt, P.49 +// +// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +// / "*" / "+" / "," / ";" / "=" +// pct-encoded = "%" HEXDIG HEXDIG +func expectPChars(t string) error { + const ( + init = iota + pct1 + pct2 + ) + st := init + for _, r := range t { + if st != init { + if !isHexDigit(r) { + return fmt.Errorf("invalid hexdigit: %c(%U)", r, r) + } + switch st { + case pct1: + st = pct2 + case pct2: + st = init + } + continue + } + + // unreserved + switch { + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case '0' <= r && r <= '9': + continue + } + switch r { + case '-', '.', '_', '~': + // unreserved + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + // sub-delims + case ':', '@': + // rest of pchar + case '%': + // pct-encoded + st = pct1 + default: + return fmt.Errorf("invalid character in path segment: %q(%U)", r, r) + } + } + if st != init { + return fmt.Errorf("invalid percent-encoding in %q", t) + } + return nil +} + +// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). +func expectIdent(ident string) error { + if ident == "" { + return errors.New("empty identifier") + } + for pos, r := range ident { + switch { + case '0' <= r && r <= '9': + if pos == 0 { + return fmt.Errorf("identifier starting with digit: %s", ident) + } + continue + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case r == '_': + continue + default: + return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident) + } + } + return nil +} + +func isHexDigit(r rune) bool { + switch { + case '0' <= r && r <= '9': + return true + case 'A' <= r && r <= 'F': + return true + case 'a' <= r && r <= 'f': + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go new file mode 100644 index 00000000..5a814a00 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go @@ -0,0 +1,60 @@ +package httprule + +import ( + "fmt" + "strings" +) + +type template struct { + segments []segment + verb string + template string +} + +type segment interface { + fmt.Stringer + compile() (ops []op) +} + +type wildcard struct{} + +type deepWildcard struct{} + +type literal string + +type variable struct { + path string + segments []segment +} + +func (wildcard) String() string { + return "*" +} + +func (deepWildcard) String() string { + return "**" +} + +func (l literal) String() string { + return string(l) +} + +func (v variable) String() string { + var segs []string + for _, s := range v.segments { + segs = append(segs, s.String()) + } + return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/")) +} + +func (t template) String() string { + var segs []string + for _, s := range t.segments { + segs = append(segs, s.String()) + } + str := strings.Join(segs, "/") + if t.verb != "" { + str = fmt.Sprintf("%s:%s", str, t.verb) + } + return "/" + str +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 00000000..a65d88eb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,97 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_internal_test.go", + "mux_test.go", + "pattern_test.go", + "query_fuzz_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto_googleapis_rpc//errdetails", + "@org_golang_google_genproto_googleapis_rpc//status", + "@org_golang_google_grpc//:grpc", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//health/grpc_health_v1", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/fieldmaskpb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go new file mode 100644 index 00000000..2f2b3424 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -0,0 +1,417 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound +// header isn't present. If the value is 0 the sent `context` will not have a timeout. +var DefaultContextTimeout = 0 * time.Second + +// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed. +// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context. +var malformedHTTPHeaders = map[string]struct{}{ + "connection": {}, +} + +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + httpPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func isValidGRPCMetadataKey(key string) bool { + // Must be a valid gRPC "Header-Name" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means 0-9 a-z _ - . + // Only lowercase letters are valid in the wire protocol, but the client library will normalize + // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable. + bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + validLowercaseLetter := ch >= 'a' && ch <= 'z' + validUppercaseLetter := ch >= 'A' && ch <= 'Z' + validDigit := ch >= '0' && ch <= '9' + validOther := ch == '.' || ch == '-' || ch == '_' + if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther { + return false + } + } + return true +} + +func isValidGRPCMetadataTextValue(textValue string) bool { + // Must be a valid gRPC "ASCII-Value" as defined here: + // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md + // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive. + bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode. + for _, ch := range bytes { + if ch < 0x20 || ch > 0x7E { + return false + } + } + return true +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + var pairs []string + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + switch key { + case xForwardedFor, xForwardedHost: + // Handled separately below + continue + } + + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + if !isValidGRPCMetadataKey(h) { + grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h) + continue + } + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } else if !isValidGRPCMetadataTextValue(val) { + grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h) + continue + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + xff := req.Header.Values(xForwardedFor) + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + xff = append(xff, remoteIP) + } + } + if len(xff) > 0 { + pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", ")) + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + if ctx == nil { + return md, false + } + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + return + } +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} + +// isMalformedHTTPHeader checks whether header belongs to the list of +// "malformed headers" and would be rejected by the gRPC server. +func isMalformedHTTPHeader(header string) bool { + _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)] + return isMalformed +} + +// RPCMethod returns the method string for the server context. The returned +// string is in the format of "/package.service/method". +func RPCMethod(ctx context.Context) (string, bool) { + m := ctx.Value(rpcMethodKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { + return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) +} + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} + +// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists. +func HTTPPattern(ctx context.Context) (Pattern, bool) { + v, ok := ctx.Value(httpPatternKey{}).(Pattern) + return v, ok +} + +func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context { + return context.WithValue(ctx, httpPatternKey{}, httpPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go new file mode 100644 index 00000000..d7b15fcf --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return nil, err + } + values[i] = value + } + return values, nil +} + +// Support for google.protobuf.wrappers on top of primitive types + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrapperspb.StringValue, error) { + return wrapperspb.String(val), nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrapperspb.FloatValue, error) { + parsedVal, err := Float32(val) + return wrapperspb.Float(parsedVal), err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { + parsedVal, err := Float64(val) + return wrapperspb.Double(parsedVal), err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrapperspb.BoolValue, error) { + parsedVal, err := Bool(val) + return wrapperspb.Bool(parsedVal), err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrapperspb.Int32Value, error) { + parsedVal, err := Int32(val) + return wrapperspb.Int32(parsedVal), err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { + parsedVal, err := Uint32(val) + return wrapperspb.UInt32(parsedVal), err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrapperspb.Int64Value, error) { + parsedVal, err := Int64(val) + return wrapperspb.Int64(parsedVal), err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { + parsedVal, err := Uint64(val) + return wrapperspb.UInt64(parsedVal), err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrapperspb.BytesValue, error) { + parsedVal, err := Bytes(val) + return wrapperspb.Bytes(parsedVal), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go new file mode 100644 index 00000000..b6e5ddf7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go new file mode 100644 index 00000000..01f57341 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -0,0 +1,191 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ErrorHandlerFunc is the signature used to configure error handling. +type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +// StreamErrorHandlerFunc is the signature used to configure stream error handling. +type StreamErrorHandlerFunc func(context.Context, error) *status.Status + +// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. +type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) + +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return 499 + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + default: + grpclog.Warningf("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError + } +} + +// HTTPError uses the mux-configured error handler. +func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + mux.errorHandler(ctx, mux, marshaler, w, r, err) +} + +// DefaultHTTPErrorHandler is the default error handler. +// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body written by this function is a Status message marshaled by the Marshaler. +func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}` + + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + + s := status.Convert(err) + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + respRw, err := mux.forwardResponseRewriter(ctx, s.Proto()) + if err != nil { + grpclog.Errorf("Failed to rewrite error message %q: %v", s, err) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallbackRewriter); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + contentType := marshaler.ContentType(respRw) + w.Header().Set("Content-Type", contentType) + + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + + buf, merr := marshaler.Marshal(respRw) + if merr != nil { + grpclog.Errorf("Failed to marshal error message %q: %v", s, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + return status.Convert(err) +} + +// DefaultRoutingErrorHandler is our default handler for routing errors. +// By default http error codes mapped on the following error codes: +// +// NotFound -> grpc.NotFound +// StatusBadRequest -> grpc.InvalidArgument +// MethodNotAllowed -> grpc.Unimplemented +// Other -> grpc.Internal, method is not expecting to be called for anything else +func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { + case http.StatusBadRequest: + sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus)) + case http.StatusMethodNotAllowed: + sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus)) + case http.StatusNotFound: + sterr = status.Error(codes.NotFound, http.StatusText(httpStatus)) + } + mux.errorHandler(ctx, mux, marshaler, w, r, sterr) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go new file mode 100644 index 00000000..9005d6a0 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -0,0 +1,168 @@ +package runtime + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "sort" + + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" +) + +func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { + fd := fields.ByName(protoreflect.Name(name)) + if fd != nil { + return fd + } + + return fields.ByJSONName(name) +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + + if err := json.NewDecoder(r).Decode(&root); err != nil { + if errors.Is(err, io.EOF) { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + m, ok := item.node.(map[string]interface{}) + switch { + case ok && len(m) > 0: + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { + return nil, errors.New("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) + if fd == nil { + return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName()) + } + + if isDynamicProtoMessage(fd.Message()) { + for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath + } + queue = append(queue, fieldMaskPathItem{path: newPath}) + } + continue + } + + if isProtobufAnyMessage(fd.Message()) && !fd.IsList() { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + + child := fieldMaskPathItem{ + node: v, + } + if item.path == "" { + child.path = string(fd.FullName().Name()) + } else { + child.path = item.path + "." + string(fd.FullName().Name()) + } + + switch { + case fd.IsList(), fd.IsMap(): + // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86 + // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop. + fm.Paths = append(fm.Paths, child.path) + case fd.Message() != nil: + child.msg = item.msg.Get(fd).Message() + fallthrough + default: + queue = append(queue, child) + } + } + case ok && len(m) == 0: + fallthrough + case len(item.path) > 0: + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + // Sort for deterministic output in the presence + // of repeated fields. + sort.Strings(fm.Paths) + + return fm, nil +} + +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + +func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") +} + +// buildPathsBlindly does not attempt to match proto field names to the +// json value keys. Instead it relies completely on the structure of +// the unmarshalled json contained within in. +// Returns a slice containing all subpaths with the root at the +// passed in name and json value. +func buildPathsBlindly(name string, in interface{}) []string { + m, ok := in.(map[string]interface{}) + if !ok { + return []string{name} + } + + var paths []string + queue := []fieldMaskPathItem{{path: name, node: m}} + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + m, ok := cur.node.(map[string]interface{}) + if !ok { + // This should never happen since we should always check that we only add + // nodes of type map[string]interface{} to the queue. + continue + } + for k, v := range m { + if mi, ok := v.(map[string]interface{}); ok { + queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi}) + } else { + // This is not a struct, so there are no more levels to descend. + curPath := cur.path + "." + k + paths = append(paths, curPath) + } + } + } + return paths +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // parent message + msg protoreflect.Message +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go new file mode 100644 index 00000000..9f50a569 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -0,0 +1,247 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + "strconv" + "strings" + + "google.golang.org/genproto/googleapis/api/httpbody" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + rc := http.NewResponseController(w) + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if errors.Is(err, io.EOF) { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(respRw)) + } + + var buf []byte + httpBody, isHTTPBody := respRw.(*httpbody.HttpBody) + switch { + case respRw == nil: + buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) + case isHTTPBody: + buf = httpBody.GetData() + default: + result := map[string]interface{}{"result": respRw} + if rb, ok := respRw.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Errorf("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } + err = rc.Flush() + if err != nil { + if errors.Is(err, http.ErrNotSupported) { + grpclog.Errorf("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + grpclog.Errorf("Failed to flush response to client: %v", err) + return + } + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h)) + } + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.TrailerMD { + if h, ok := mux.outgoingTrailerMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Error("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, mux, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + contentType := marshaler.ContentType(resp) + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + respRw, err := mux.forwardResponseRewriter(ctx, resp) + if err != nil { + grpclog.Errorf("Rewrite error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + if rb, ok := respRw.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(respRw) + } + if err != nil { + grpclog.Errorf("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if !doForwardTrailers { + w.Header().Set("Content-Length", strconv.Itoa(len(buf))) + } + + if _, err = w.Write(buf); err != nil { + grpclog.Errorf("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, mux, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + return fmt.Errorf("error handling ForwardResponseOptions: %w", err) + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) + } + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } + if _, err := w.Write(delimiter); err != nil { + grpclog.Errorf("Failed to send delimiter chunk: %v", err) + return + } +} + +func errorChunk(st *status.Status) map[string]proto.Message { + return map[string]proto.Message{"error": st.Proto()} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go new file mode 100644 index 00000000..6de2e220 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -0,0 +1,32 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType returns its specified content type in case v is a +// google.api.HttpBody message, otherwise it will fall back to the default Marshalers +// content type. +func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType(v) +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetData(), nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go new file mode 100644 index 00000000..fe52081a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// MarshalIndent is like Marshal but applies Indent to format the output +func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return json.MarshalIndent(v, prefix, indent) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go new file mode 100644 index 00000000..8376d1e0 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -0,0 +1,349 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "google.golang.org/protobuf/encoding/protojson" marshaler. +// It supports the full functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb struct { + protojson.MarshalOptions + protojson.UnmarshalOptions +} + +// ContentType always returns "application/json". +func (*JSONPb) ContentType(_ interface{}) string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + if j.Indent != "" { + b := &bytes.Buffer{} + if err := json.Indent(b, buf, "", j.Indent); err != nil { + return err + } + buf = b.Bytes() + } + _, err = w.Write(buf) + return err + } + + b, err := j.MarshalOptions.Marshal(p) + if err != nil { + return err + } + + _, err = w.Write(b) + return err +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshal arbitrary data structures into JSON, +// it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitUnpopulated { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + if err := buf.WriteByte('['); err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + if err := buf.WriteByte(','); err != nil { + return nil, err + } + } + var err error + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { + return nil, err + } + } + if err := buf.WriteByte(']'); err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, j.UnmarshalOptions, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{ + Decoder: d, + UnmarshalOptions: j.UnmarshalOptions, + } +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder + protojson.UnmarshalOptions +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, unmarshaler, v) +} + +func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, unmarshaler, v) + } + + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), p) +} + +func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + // Decode into bytes for marshalling + var b json.RawMessage + if err := d.Decode(&b); err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if v == nil { + null := json.RawMessage("null") + v = &null + } + if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if rv.Kind() == reflect.Slice { + if rv.Type().Elem().Kind() == reflect.Uint8 { + var sl []byte + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.SetBytes(sl) + } + return nil + } + + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) + } + for _, item := range sl { + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.Set(reflect.Append(rv, bv.Elem())) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch v := repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go new file mode 100644 index 00000000..398c780d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go @@ -0,0 +1,60 @@ +package runtime + +import ( + "errors" + "io" + + "google.golang.org/protobuf/proto" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType(_ interface{}) string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := io.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + if _, err := writer.Write(buffer); err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go new file mode 100644 index 00000000..2c0d25ff --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -0,0 +1,50 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + // The parameter describes the type which is being marshalled, which can sometimes + // affect the content type returned. + ContentType(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go new file mode 100644 index 00000000..0b051e6e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -0,0 +1,109 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + } +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go new file mode 100644 index 00000000..60c2065d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -0,0 +1,537 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// UnescapingMode defines the behavior of ServeMux when unescaping path parameters. +type UnescapingMode int + +const ( + // UnescapingModeLegacy is the default V2 behavior, which escapes the entire + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + + // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + + // UnescapingModeAllExceptSlash unescapes URL path parameters except path + // separators, which will be left as "%2F". + UnescapingModeAllExceptSlash + + // UnescapingModeAllCharacters unescapes all URL path parameters. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. + // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's + // reference implementation + UnescapingModeDefault = UnescapingModeLegacy +) + +var encodedPathSplitter = regexp.MustCompile("(/|%2F)") + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation +// registration methods. It is generally recommended to use gRPC client or server interceptors instead +// where possible. +type Middleware func(HandlerFunc) HandlerFunc + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + middlewares []Middleware + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + forwardResponseRewriter ForwardResponseRewriter + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + outgoingTrailerMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + errorHandler ErrorHandlerFunc + streamErrorHandler StreamErrorHandlerFunc + routingErrorHandler RoutingErrorHandlerFunc + disablePathLengthFallback bool + unescapingMode UnescapingMode +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages +// before they are forwarded in a unary, stream, or error response. +type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error) + +// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic +// that can rewrite the final response before it is forwarded. +// +// The response rewriter function is called during unary message forwarding, stream message +// forwarding and when errors are being forwarded. +// +// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect. +// Since this option involves making runtime changes to the response shape or type. +func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption { + return func(sm *ServeMux) { + sm.forwardResponseRewriter = fwdResponseRewriter + } +} + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode +// for more information. +func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.unescapingMode = mode + } +} + +// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC +// interceptors when using the direct-to-implementation registration methods and cannot rely +// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible. +func WithMiddlewares(middlewares ...Middleware) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.middlewares = append(serveMux.middlewares, middlewares...) + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function. +// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'. +// Other headers are not added to the gRPC metadata. +func DefaultHeaderMatcher(key string) (string, bool) { + switch key = textproto.CanonicalMIMEHeaderKey(key); { + case isPermanentHTTPHeader(key): + return MetadataPrefix + key, true + case strings.HasPrefix(key, MetadataHeaderPrefix): + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +func defaultOutgoingHeaderMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true +} + +func defaultOutgoingTrailerMatcher(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + for _, header := range fn.matchedMalformedHeaders() { + grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) + } + + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server. +func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string { + if fn == nil { + return nil + } + headers := make([]string, 0) + for header := range malformedHTTPHeaders { + out, accept := fn(header) + if accept && isMalformedHTTPHeader(out) { + headers = append(headers, out) + } + } + return headers +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return the modified header. +func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingTrailerMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to configure a custom error response. +func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.errorHandler = fn + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors. +// +// Method called for errors which can happen before gRPC route selected or executed. +// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest +func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.routingErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. +// When called the handler will forward the request to the upstream grpc service health check (defined in the +// gRPC Health Checking Protocol). +// +// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how +// to setup the protocol in the grpc server. +// +// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest. +func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption { + return func(s *ServeMux) { + // error can be ignored since pattern is definitely valid + _ = s.HandlePath( + http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string, + ) { + _, outboundMarshaler := MarshalerForRequest(s, r) + + resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{ + Service: r.URL.Query().Get("service"), + }) + if err != nil { + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + w.Header().Set("Content-Type", "application/json") + + if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING { + switch resp.GetStatus() { + case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN: + err = status.Error(codes.Unavailable, resp.String()) + case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN: + err = status.Error(codes.NotFound, resp.String()) + } + + s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err) + return + } + + _ = outboundMarshaler.NewEncoder(w).Encode(resp) + }) + } +} + +// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux. +// +// See WithHealthEndpointAt for the general implementation. +func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption { + return WithHealthEndpointAt(healthCheckClient, "/healthz") +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil }, + marshalers: makeMarshalerMIMERegistry(), + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher + } + if serveMux.outgoingTrailerMatcher == nil { + serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if len(s.middlewares) > 0 { + h = chainMiddlewares(s.middlewares)(h) + } + s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) +} + +// HandlePath allows users to configure custom path handlers. +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ +func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { + compiler, err := httprule.Parse(pathPattern) + if err != nil { + return fmt.Errorf("parsing path pattern: %w", err) + } + tp := compiler.Compile() + pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb) + if err != nil { + return fmt.Errorf("creating new pattern: %w", err) + } + s.Handle(meth, pattern, h) + return nil +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest) + return + } + + // TODO(v3): remove UnescapingModeLegacy + if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" { + path = r.URL.RawPath + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + r.Method = strings.ToUpper(override) + } + + var pathComponents []string + // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F" + // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the + // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default + // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved + if s.unescapingMode == UnescapingModeAllCharacters { + pathComponents = encodedPathSplitter.Split(path[1:], -1) + } else { + pathComponents = strings.Split(path[1:], "/") + } + + lastPathComponent := pathComponents[len(pathComponents)-1] + + for _, h := range s.handlers[r.Method] { + // If the pattern has a verb, explicitly look for a suffix in the last + // component that matches a colon plus the verb. This allows us to + // handle some cases that otherwise can't be correctly handled by the + // former LastIndex case, such as when the verb literal itself contains + // a colon. This should work for all cases that have run through the + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. + + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + s.handleHandler(h, w, r, pathParams) + return + } + + // if no handler has found for the request, lookup for other methods + // to handle POST -> GET fallback if the request is subject to path + // length fallback. + // Note we are not eagerly checking the request here as we want to return the + // right HTTP status code, and we need to process the fallback candidates in + // order to do that. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + var verb string + patVerb := h.pat.Verb() + + idx := -1 + if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) { + idx = len(lastPathComponent) - len(patVerb) - 1 + } + + comps := make([]string, len(pathComponents)) + copy(comps, pathComponents) + + if idx > 0 { + comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode) + if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } + continue + } + + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + // Also, only consider POST -> GET fallbacks, and avoid falling back to + // potentially dangerous operations like DELETE. + if s.isPathLengthFallback(r) && m == http.MethodGet { + if err := r.ParseForm(); err != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) + return + } + s.handleHandler(h, w, r, pathParams) + return + } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed) + return + } + } + + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} + +func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) { + h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams) +} + +func chainMiddlewares(mws []Middleware) Middleware { + return func(next HandlerFunc) HandlerFunc { + for i := len(mws); i > 0; i-- { + next = mws[i-1](next) + } + return next + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go new file mode 100644 index 00000000..e5450714 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -0,0 +1,381 @@ +package runtime + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type MalformedSequenceError string + +func (e MalformedSequenceError) Error() string { + return "malformed path escape " + strconv.Quote(string(e)) +} + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string +} + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { + if version != 1 { + grpclog.Errorf("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Errorf("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Error("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Errorf("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Errorf("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Error("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Errorf("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// MatchAndEscape will return an error if no Patterns matched or if a pattern +// matched but contained malformed escape sequences. If successful, the function +// returns a mapping from field paths to their captured values. +func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) { + if p.verb != verb { + if p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + var err error + + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } else if op.code == utilities.OpPush { + if c, err = unescape(c, unescapingMode, false); err != nil { + return nil, err + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + c := strings.Join(components[pos:end], "/") + if c, err = unescape(c, unescapingMode, true); err != nil { + return nil, err + } + stack = append(stack, c) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// MatchAndEscape examines components to determine if they match to a Pattern. +// It will never perform per-component unescaping (see: UnescapingModeLegacy). +// MatchAndEscape will return an error if no Patterns matched. If successful, +// the function returns a mapping from field paths to their captured values. +// +// Deprecated: Use MatchAndEscape. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + return p.MatchAndEscape(components, verb, UnescapingModeDefault) +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +/* + * The following code is adopted and modified from Go's standard library + * and carries the attached license. + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ + +// ishex returns whether or not the given byte is a valid hex character +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func isRFC6570Reserved(c byte) bool { + switch c { + case '!', '#', '$', '&', '\'', '(', ')', '*', + '+', ',', '/', ':', ';', '=', '?', '@', '[', ']': + return true + default: + return false + } +} + +// unhex converts a hex point to the bit representation +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// shouldUnescapeWithMode returns true if the character is escapable with the +// given mode +func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool { + switch mode { + case UnescapingModeAllExceptReserved: + if isRFC6570Reserved(c) { + return false + } + case UnescapingModeAllExceptSlash: + if c == '/' { + return false + } + case UnescapingModeAllCharacters: + return true + } + return true +} + +// unescape unescapes a path string using the provided mode +func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) { + // TODO(v3): remove UnescapingModeLegacy + if mode == UnescapingModeLegacy { + return s, nil + } + + if !multisegment { + mode = UnescapingModeAllCharacters + } + + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + if s[i] == '%' { + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + + return "", MalformedSequenceError(s) + } + i += 3 + } else { + i++ + } + } + + if n == 0 { + return s, nil + } + + var t strings.Builder + t.Grow(len(s)) + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if shouldUnescapeWithMode(c, mode) { + t.WriteByte(c) + i += 2 + continue + } + fallthrough + default: + t.WriteByte(s[i]) + } + } + + return t.String(), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go new file mode 100644 index 00000000..d549407f --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "google.golang.org/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go new file mode 100644 index 00000000..fe634174 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -0,0 +1,372 @@ +package runtime + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + field_mask "google.golang.org/protobuf/types/known/fieldmaskpb" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +var currentQueryParser QueryParameterParser = &DefaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +// DefaultQueryParser is a QueryParameterParser which implements the default +// query parameters parsing behavior. +// +// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context. +type DefaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + + msgValue := msg.ProtoReflect() + fieldPath := normalizeFieldPath(msgValue, strings.Split(key, ".")) + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) +} + +func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string { + newFieldPath := make([]string, 0, len(fieldPath)) + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + fieldDesc := fields.ByTextName(fieldName) + if fieldDesc == nil { + fieldDesc = fields.ByJSONName(fieldName) + } + if fieldDesc == nil { + // return initial field path values if no matching message field was found + return fieldPath + } + + newFieldPath = append(newFieldPath, string(fieldDesc.Name())) + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated { + return fieldPath + } + + // Get the nested message + msgValue = msgValue.Get(fieldDesc).Message() + } + + return newFieldPath +} + +func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + + var fieldDescriptor protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + + // Get field by name + fieldDescriptor = fields.ByName(protoreflect.Name(fieldName)) + if fieldDescriptor == nil { + fieldDescriptor = fields.ByJSONName(fieldName) + if fieldDescriptor == nil { + // We're not returning an error here because this could just be + // an extra query parameter that isn't part of the request. + grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + // Get the nested message + msgValue = msgValue.Mutable(fieldDescriptor).Message() + } + + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil { + if f := msgValue.WhichOneof(of); f != nil { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + + switch { + case fieldDescriptor.IsList(): + return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) + case fieldDescriptor.IsMap(): + return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values) + } + + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", ")) + } + + return populateField(fieldDescriptor, msgValue, values[0]) +} + +func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err) + } + + msgValue.Set(fieldDescriptor, v) + return nil +} + +func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err) + } + list.Append(v) + } + + return nil +} + +func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName()) + } + + key, err := parseField(fieldDescriptor.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err) + } + + value, err := parseField(fieldDescriptor.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err) + } + + mp.Set(key.MapKey(), value) + + return nil +} + +func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fieldDescriptor.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) + if err != nil { + if errors.Is(err, protoregistry.NotFound) { + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) + } + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number + if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fieldDescriptor.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind())) + } +} + +func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = durationpb.New(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Double(v) + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Float(float32(v)) + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int64(v) + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Int32(int32(v)) + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt64(v) + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.UInt32(uint32(v)) + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bool(v) + case "google.protobuf.StringValue": + msg = wrapperspb.String(value) + case "google.protobuf.BytesValue": + v, err := Bytes(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = wrapperspb.Bytes(v) + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + case "google.protobuf.Value": + var v structpb.Value + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + case "google.protobuf.Struct": + var v structpb.Struct + if err := protojson.Unmarshal([]byte(value), &v); err != nil { + return protoreflect.Value{}, err + } + msg = &v + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } + + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel new file mode 100644 index 00000000..b8940946 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "utilities", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "string_array_flag.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", +) + +go_test( + name = "utilities_test", + size = "small", + srcs = [ + "string_array_flag_test.go", + "trie_test.go", + ], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go new file mode 100644 index 00000000..cf79a4d5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go new file mode 100644 index 00000000..dfe7de48 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go new file mode 100644 index 00000000..01d26eda --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go @@ -0,0 +1,19 @@ +package utilities + +import ( + "bytes" + "io" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go new file mode 100644 index 00000000..d224ab77 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -0,0 +1,33 @@ +package utilities + +import ( + "flag" + "strings" +) + +// flagInterface is an cut down interface to `flag` +type flagInterface interface { + Var(value flag.Value, name string, usage string) +} + +// StringArrayFlag defines a flag with the specified name and usage string. +// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag. +func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags { + value := &StringArrayFlags{} + f.Var(value, name, usage) + return value +} + +// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var` +type StringArrayFlags []string + +// String returns a string representation of `StringArrayFlags` +func (i *StringArrayFlags) String() string { + return strings.Join(*i, ",") +} + +// Set appends a value to `StringArrayFlags` +func (i *StringArrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go new file mode 100644 index 00000000..dd99b0ed --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go @@ -0,0 +1,174 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + encoded := make([]int, 0, len(seq)) + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + return k < len(sj) +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 4c28dff4..4528059c 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,9 +1,8 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh - - go install mvdan.cc/garble@v0.10.1 builds: - @@ -32,7 +31,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2d" binary: s2d @@ -59,7 +57,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble - id: "s2sx" binary: s2sx @@ -87,7 +84,6 @@ builds: - mips64le goarm: - 7 - gobinary: garble archives: - @@ -103,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583..de264c85 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,51 @@ This package provides various compression algorithms. # changelog +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + +* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) + * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 + * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 + +* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5) + * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912 + * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908 + * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913 + * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910 + * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917 +https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918 + +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -31,6 +76,10 @@ This package provides various compression algorithms. * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839 * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837 * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860 + +
+ See changes to v1.16.x + * July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 @@ -53,7 +102,7 @@ This package provides various compression algorithms. * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -69,6 +118,7 @@ This package provides various compression algorithms. * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 +
See changes to v1.15.x @@ -107,7 +157,7 @@ This package provides various compression algorithms. * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -310,7 +360,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -489,7 +539,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. @@ -536,6 +586,8 @@ the stateless compress described below. For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). +To disable all assembly add `-tags=noasm`. This works across all packages. + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. @@ -554,7 +606,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7..0c7dd4ff 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b2..0f56b02d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 2aa6a95a..2754bac6 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -51,7 +51,7 @@ func emitCopy(dst []byte, offset, length int) int { i := 0 // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because + // length emitted down below is a little lower (at 60 = 64 - 4), because // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853f..5a4412f9 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9f17ce60..9c28840c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -554,6 +554,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if debugDecoder { printf("Compression modes: 0b%b", compMode) } + if compMode&3 != 0 { + return errors.New("corrupt block: reserved bits not zero") + } for i := uint(0); i < 3; i++ { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { @@ -595,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 2cfe925a..32a7f401 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -427,6 +427,16 @@ func (b *blockEnc) encodeLits(lits []byte, raw bool) error { return nil } +// encodeRLE will encode an RLE block. +func (b *blockEnc) encodeRLE(val byte, length uint32) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(length) + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, val) +} + // fuzzFseEncoder can be used to fuzz the FSE encoder. func fuzzFseEncoder(data []byte) int { if len(data) > maxSequences || len(data) < 2 { @@ -479,6 +489,16 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { if len(b.sequences) == 0 { return b.encodeLits(b.literals, rawAllLits) } + if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { + // Check common RLE cases. + seq := b.sequences[0] + if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { + // Offset == 1 and 0 or 1 literals. + b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) + return nil + } + } + // We want some difference to at least account for the headers. saved := b.size - len(b.literals) - (b.size >> 6) if saved < 16 { diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a24097..6a5a2988 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f04aaa21..bbca1723 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -82,7 +82,7 @@ var ( // can run multiple concurrent stateless decodes. It is even possible to // use stateless decodes while a stream is being decoded. // -// The Reset function can be used to initiate a new stream, which is will considerably +// The Reset function can be used to initiate a new stream, which will considerably // reduce the allocations normally caused by NewReader. func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { initPredefined() diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index 8d5567fe..b7b83164 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -273,6 +273,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { enc.Encode(&block, b) addValues(&remain, block.literals) litTotal += len(block.literals) + if len(block.sequences) == 0 { + continue + } seqs += len(block.sequences) block.genCodes() addHist(&ll, block.coders.llEnc.Histogram()) @@ -286,6 +289,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if offset == 0 { continue } + if int(offset) >= len(o.History) { + continue + } if offset > 3 { newOffsets[offset-3]++ } else { @@ -336,6 +342,9 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { if seqs/nUsed < 512 { // Use 512 as minimum. nUsed = seqs / 512 + if nUsed == 0 { + nUsed = 1 + } } copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { hist := dst.Histogram() @@ -358,6 +367,28 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { fakeLength += v hist[i] = uint32(v) } + + // Ensure we aren't trying to represent RLE. + if maxCount == fakeLength { + for i := range hist { + if uint8(i) == maxSym { + fakeLength++ + maxSym++ + hist[i+1] = 1 + if maxSym > 1 { + break + } + } + if hist[0] == 0 { + fakeLength++ + hist[i] = 1 + if maxSym > 1 { + break + } + } + } + } + dst.HistogramFinished(maxSym, maxCount) dst.reUsed = false dst.useRLE = false diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index c81a1535..4613724e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -135,8 +135,20 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { break } + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -201,14 +213,6 @@ encodeLoop: if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { return } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { - panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } // Try to quick reject if we already have a long match. if m.length > 16 { left := len(src) - int(m.s+m.length) @@ -227,8 +231,10 @@ encodeLoop: } } l := 4 + e.matchlen(s+4, offset+4, src) - if true { + if m.rep <= 0 { // Extend candidate match backwards as far as possible. + // Do not extend repeats as we can assume they are optimal + // and offsets change if s == nextEmit. tMin := s - e.maxMatchOff if tMin < 0 { tMin = 0 @@ -239,7 +245,14 @@ encodeLoop: l++ } } - + if debugAsserts { + if offset >= s { + panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) + } + if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { + panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } cand := match{offset: offset, s: s, length: l, rep: rep} cand.estBits(bitsPerByte) if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { @@ -336,24 +349,31 @@ encodeLoop: } if debugAsserts { + if best.offset >= best.s { + panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) + } + if best.s < nextEmit { + panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) + } + if best.offset < s-e.maxMatchOff { + panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) + } if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) } } // We have a match, we can store the forward value + s = best.s if best.rep > 0 { var seq seq seq.matchLen = uint32(best.length - zstdMinMatch) - if debugAsserts && s < nextEmit { - panic("s < nextEmit") - } addLiterals(&seq, best.s) // Repeat. If bit 4 is set, this is a non-lit repeat. seq.offset = uint32(best.rep & 3) if debugSequences { - println("repeat sequence", seq, "next s:", s) + println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) } blk.sequences = append(blk.sequences, seq) @@ -396,7 +416,6 @@ encodeLoop: // A 4-byte match has been found. Update recent offsets. // We'll later see if more than 4 bytes. - s = best.s t := best.offset offset1, offset2, offset3 = s-t, offset1, offset2 diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 20d25b0e..84a79fde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -102,9 +102,20 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { e.cur = e.maxMatchOff break } - + // Add block to history s := e.addBlock(src) blk.size = len(src) + + // Check RLE first + if len(src) > zstdMinMatch { + ml := matchLen(src[1:], src) + if ml == len(src)-1 { + blk.literals = append(blk.literals, src[0]) + blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) + return + } + } + if len(src) < minNonLiteralBlockSize { blk.extraLits = len(src) blk.literals = blk.literals[:len(src)] @@ -168,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -199,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -230,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -259,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -697,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -727,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -761,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -790,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f..d36be7bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0..8f8223cd 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf8192..20671dcb 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7..e47af66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed4..667ca067 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe..8adfebb0 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 17901e08..ae7d4d32 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -162,12 +162,12 @@ finalize: MOVD h, ret+24(FP) RET -// func writeBlocks(d *Digest, b []byte) int +// func writeBlocks(s *Digest, b []byte) int TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest + MOVD s+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s index 9a7655c0..0782b86e 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s @@ -5,7 +5,6 @@ #include "textflag.h" // func matchLen(a []byte, b []byte) int -// Requires: BMI TEXT ·matchLen(SB), NOSPLIT, $0-56 MOVQ a_base+0(FP), AX MOVQ b_base+24(FP), CX @@ -17,17 +16,16 @@ TEXT ·matchLen(SB), NOSPLIT, $0-56 JB matchlen_match4_standalone matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - TESTQ BX, BX - JZ matchlen_loop_standalone + MOVQ (AX)(SI*1), BX + XORQ (CX)(SI*1), BX + JZ matchlen_loop_standalone #ifdef GOAMD64_v3 TZCNTQ BX, BX #else BSFQ BX, BX #endif - SARQ $0x03, BX + SHRL $0x03, BX LEAL (SI)(BX*1), SI JMP gen_match_len_end diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd82..c59f17e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b9972..f5591fa1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1826,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2391,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -2914,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3581,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc73..066bef2a 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -88,6 +88,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE index dd878a30..b9cc55ab 100644 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format http://github.com/golang/protobuf/ Copyright 2010 The Go Authors See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE new file mode 100644 index 00000000..65d761bc --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go new file mode 100644 index 00000000..8547c8df --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go @@ -0,0 +1,145 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package header provides functions for parsing HTTP headers. +package header + +import ( + "net/http" + "strings" +) + +// Octet types from RFC 2616. +var octetTypes [256]octetType + +type octetType byte + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// AcceptSpec describes an Accept* header. +type AcceptSpec struct { + Value string + Q float64 +} + +// ParseAccept parses Accept* headers. +func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { +loop: + for _, s := range header[key] { + for { + var spec AcceptSpec + spec.Value, s = expectTokenSlash(s) + if spec.Value == "" { + continue loop + } + spec.Q = 1.0 + s = skipSpace(s) + if strings.HasPrefix(s, ";") { + s = skipSpace(s[1:]) + if !strings.HasPrefix(s, "q=") { + continue loop + } + spec.Q, s = expectQuality(s[2:]) + if spec.Q < 0.0 { + continue loop + } + } + specs = append(specs, spec) + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + continue loop + } + s = skipSpace(s[1:]) + } + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectTokenSlash(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + b := s[i] + if (octetTypes[b]&isToken == 0) && b != '/' { + break + } + } + return s[:i], s[i:] +} + +func expectQuality(s string) (q float64, rest string) { + switch { + case len(s) == 0: + return -1, "" + case s[0] == '0': + q = 0 + case s[0] == '1': + q = 1 + default: + return -1, "" + } + s = s[1:] + if !strings.HasPrefix(s, ".") { + return q, s + } + s = s[1:] + i := 0 + n := 0 + d := 1 + for ; i < len(s); i++ { + b := s[i] + if b < '0' || b > '9' { + break + } + n = n*10 + int(b) - '0' + d *= 10 + } + return q + float64(n)/float64(d), s[i:] +} diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go new file mode 100644 index 00000000..2e45780b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go @@ -0,0 +1,36 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +package httputil + +import ( + "net/http" + + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header" +) + +// NegotiateContentEncoding returns the best offered content encoding for the +// request's Accept-Encoding header. If two offers match with equal weight and +// then the offer earlier in the list is preferred. If no offers are +// acceptable, then "" is returned. +func NegotiateContentEncoding(r *http.Request, offers []string) string { + bestOffer := "identity" + bestQ := -1.0 + specs := header.ParseAccept(r.Header, "Accept-Encoding") + for _, offer := range offers { + for _, spec := range specs { + if spec.Q > bestQ && + (spec.Value == "*" || spec.Value == offer) { + bestQ = spec.Q + bestOffer = offer + } + } + } + if bestQ == 0 { + bestOffer = "" + } + return bestOffer +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go index bcfa4fa1..cc4ef107 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go @@ -37,6 +37,9 @@ var ( // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. // e.g. go_sched_goroutines_goroutines MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} + // MetricsDebug allows only debug metrics to be collected from Go runtime. + // e.g. go_godebug_non_default_behavior_gocachetest_events_total + MetricsDebug = GoRuntimeMetricsRule{regexp.MustCompile(`^/godebug/.*`)} ) // WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: @@ -44,7 +47,6 @@ var ( // go_memstats_alloc_bytes // go_memstats_alloc_bytes_total // go_memstats_sys_bytes -// go_memstats_lookups_total // go_memstats_mallocs_total // go_memstats_frees_total // go_memstats_heap_alloc_bytes diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go index ad9a71a5..520cbd7d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -22,13 +22,13 @@ import ( // goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. // From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so // while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. +// populated using runtime/metrics. Those are the defaults we can't alter. func goRuntimeMemStats() memStatsMetrics { return memStatsMetrics{ { desc: NewDesc( memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", + "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, @@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", + "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, @@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", + "Number of bytes obtained from system. Equals to /memory/classes/total:byte.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, }, { desc: NewDesc( memstatNamespace("mallocs_total"), - "Total number of mallocs.", + // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric. + "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, @@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("frees_total"), - "Total number of frees.", + "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, @@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", + "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, @@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", + "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, @@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", + "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, @@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", + "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, @@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", + "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, @@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("heap_objects"), - "Number of allocated objects.", + "Number of currently allocated objects. Equals to /gc/heap/objects:objects.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, @@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", + "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, @@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", + "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, @@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", + "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, @@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", + "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, @@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", + "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, @@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", + "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, @@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", + "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, @@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", + "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, @@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", + "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, @@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics { }, { desc: NewDesc( memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", + "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.", nil, nil, ), eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, @@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector { nil, nil), gcDesc: NewDesc( "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", + "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.", nil, nil), gcLastTimeDesc: NewDesc( "go_memstats_last_gc_time_seconds", diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 2d8d9f64..51174641 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -17,6 +17,7 @@ package prometheus import ( + "fmt" "math" "runtime" "runtime/metrics" @@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions { "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, }, RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, + // Recommended metrics we want by default from runtime/metrics. + {Matcher: internal.GoCollectorDefaultRuntimeMetrics}, }, } } @@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { // to fail here. This condition is tested in TestExpectedRuntimeMetrics. continue } + help := attachOriginalName(d.Description.Description, d.Name) sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] @@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { m = newBatchHistogram( NewDesc( BuildFQName(namespace, subsystem, name), - d.Description.Description, + help, nil, nil, ), @@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }, ) } else { @@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { Namespace: namespace, Subsystem: subsystem, Name: name, - Help: d.Description.Description, + Help: help, }) } metricSet = append(metricSet, m) @@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } } +func attachOriginalName(desc, origName string) string { + return fmt.Sprintf("%s Sourced from %s", desc, origName) +} + // Describe returns all descriptions of the collector. func (c *goCollector) Describe(ch chan<- *Desc) { c.base.Describe(ch) @@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 { // // This should never happen because we always populate our metric // set from the runtime/metrics package. - panic("unexpected unsupported metric") + panic("unexpected bad kind metric") default: // Unsupported metric kind. // // This should never happen because we check for this during initialization // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") + panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind())) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index b5c8bcb3..519db348 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -440,7 +440,7 @@ type HistogramOpts struct { // constant (or any negative float value). NativeHistogramZeroThreshold float64 - // The remaining fields define a strategy to limit the number of + // The next three fields define a strategy to limit the number of // populated sparse buckets. If NativeHistogramMaxBucketNumber is left // at zero, the number of buckets is not limited. (Note that this might // lead to unbounded memory consumption if the values observed by the @@ -473,6 +473,22 @@ type HistogramOpts struct { NativeHistogramMinResetDuration time.Duration NativeHistogramMaxZeroThreshold float64 + // NativeHistogramMaxExemplars limits the number of exemplars + // that are kept in memory for each native histogram. If you leave it at + // zero, a default value of 10 is used. If no exemplars should be kept specifically + // for native histograms, set it to a negative value. (Scrapers can + // still use the exemplars exposed for classic buckets, which are managed + // independently.) + NativeHistogramMaxExemplars int + // NativeHistogramExemplarTTL is only checked once + // NativeHistogramMaxExemplars is exceeded. In that case, the + // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL. + // Otherwise, the older exemplar in the pair of exemplars that are closest + // together (on an exponential scale) is removed. + // If NativeHistogramExemplarTTL is left at its zero value, a default value of + // 5m is used. To always delete the oldest exemplar, set it to a negative value. + NativeHistogramExemplarTTL time.Duration + // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -532,6 +548,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr if opts.afterFunc == nil { opts.afterFunc = time.AfterFunc } + h := &histogram{ desc: desc, upperBounds: opts.Buckets, @@ -556,6 +573,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) + h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars) } for i, upperBound := range h.upperBounds { if i < len(h.upperBounds)-1 { @@ -725,7 +743,8 @@ type histogram struct { // resetScheduled is protected by mtx. It is true if a reset is // scheduled for a later time (when nativeHistogramMinResetDuration has // passed). - resetScheduled bool + resetScheduled bool + nativeExemplars nativeExemplars // now is for testing purposes, by default it's time.Now. now func() time.Time @@ -742,6 +761,9 @@ func (h *histogram) Observe(v float64) { h.observe(v, h.findBucket(v)) } +// ObserveWithExemplar should not be called in a high-frequency setting +// for a native histogram with configured exemplars. For this case, +// the implementation isn't lock-free and might suffer from lock contention. func (h *histogram) ObserveWithExemplar(v float64, e Labels) { i := h.findBucket(v) h.observe(v, i) @@ -821,6 +843,13 @@ func (h *histogram) Write(out *dto.Metric) error { Length: proto.Uint32(0), }} } + + if h.nativeExemplars.isEnabled() { + h.nativeExemplars.Lock() + his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) + h.nativeExemplars.Unlock() + } + } addAndResetCounts(hotCounts, coldCounts) return nil @@ -1091,8 +1120,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) { deleteSyncMap(&counts.nativeHistogramBucketsPositive) } -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. +// updateExemplar replaces the exemplar for the provided classic bucket. +// With empty labels, it's a no-op. It panics if any of the labels is invalid. +// If histogram is native, the exemplar will be cached into nativeExemplars, +// which has a limit, and will remove one exemplar when limit is reached. func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { if l == nil { return @@ -1102,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { panic(err) } h.exemplars[bucket].Store(e) + doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) + if doSparse { + h.nativeExemplars.addExemplar(e) + } } // HistogramVec is a Collector that bundles a set of Histograms that all share the @@ -1336,6 +1371,48 @@ func MustNewConstHistogram( return m } +// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp. +func NewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where +// NewConstHistogramWithCreatedTimestamp would have returned an error. +func MustNewConstHistogramWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} + type buckSort []*dto.Bucket func (s buckSort) Len() int { @@ -1575,3 +1652,186 @@ func addAndResetCounts(hot, cold *histogramCounts) { atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) } + +type nativeExemplars struct { + sync.Mutex + + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + + exemplars []*dto.Exemplar +} + +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + +func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { + if ttl == 0 { + ttl = 5 * time.Minute + } + + if maxCount == 0 { + maxCount = 10 + } + + if maxCount < 0 { + maxCount = 0 + ttl = -1 + } + + return nativeExemplars{ + ttl: ttl, + exemplars: make([]*dto.Exemplar, 0, maxCount), + } +} + +func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { + if !n.isEnabled() { + return + } + + n.Lock() + defer n.Unlock() + + // When the number of exemplars has not yet exceeded or + // is equal to cap(n.exemplars), then + // insert the new exemplar directly. + if len(n.exemplars) < cap(n.exemplars) { + var nIdx int + for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { + if *e.Value < *n.exemplars[nIdx].Value { + break + } + } + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...) + return + } + + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + + // When the number of exemplars exceeds the limit, remove one exemplar. + var ( + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. + + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. + + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. + ) + + for i, exemplar := range n.exemplars { + // Find the exemplar with the oldest timestamp. + if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) { + ot = exemplar.Timestamp.AsTime() + otIdx = i + } + + // Find the index at which to insert new the exemplar. + if nIdx == -1 && *e.Value <= *exemplar.Value { + nIdx = i + } + + // Find the two closest exemplars and pick the one the with older timestamp. + pLog = cLog + cLog = math.Log(exemplar.GetValue()) + if i == 0 { + continue + } + diff := math.Abs(cLog - pLog) + if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. + md = diff + if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { + rIdx = i + } else { + rIdx = i - 1 + } + } + + } + + // If all existing exemplar are smaller than new exemplar, + // then the exemplar should be inserted at the end. + if nIdx == -1 { + nIdx = len(n.exemplars) + } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) + + if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. + rIdx = otIdx + } else { + // In the previous for loop, when calculating the closest pair of exemplars, + // we did not take into account the newly inserted exemplar. + // So we need to calculate with the newly inserted exemplar again. + elog := math.Log(e.GetValue()) + if nIdx > 0 { + diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) + if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. + md = diff + rIdx = nIdx - 1 + } + } + if nIdx < len(n.exemplars) { + diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) + if diff < md { + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx + } + } + } + + // Adjust the slice according to rIdx and nIdx. + switch { + case rIdx == nIdx: + n.exemplars[nIdx] = e + case rIdx < nIdx: + n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...) + case rIdx > nIdx: + n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go index 723b45d6..a4fa6eab 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go @@ -30,3 +30,5 @@ type GoCollectorOptions struct { RuntimeMetricSumForHist map[string]string RuntimeMetricRules []GoCollectorRule } + +var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index f018e572..9d9b81ab 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -234,7 +234,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { ) for i, e := range exemplars { ts := e.Timestamp - if ts == (time.Time{}) { + if ts.IsZero() { ts = now } exs[i], err = newExemplar(e.Value, ts, e.Labels) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 8548dd18..62a4e7ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -22,14 +22,15 @@ import ( ) type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc + inBytes, outBytes *Desc } // ProcessCollectorOpts defines the behavior of a process metrics collector @@ -100,6 +101,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { "Start time of the process since unix epoch in seconds.", nil, nil, ), + inBytes: NewDesc( + ns+"process_network_receive_bytes_total", + "Number of bytes received by the process over the network.", + nil, nil, + ), + outBytes: NewDesc( + ns+"process_network_transmit_bytes_total", + "Number of bytes sent by the process over the network.", + nil, nil, + ), } if opts.PidFn == nil { @@ -129,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go index 8c1136ce..14d56d2d 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -63,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) { } else { c.reportError(ch, nil, err) } + + if netstat, err := p.Netstat(); err == nil { + var inOctets, outOctets float64 + if netstat.IpExt.InOctets != nil { + inOctets = *netstat.IpExt.InOctets + } + if netstat.IpExt.OutOctets != nil { + outOctets = *netstat.IpExt.OutOctets + } + ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets) + ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets) + } else { + c.reportError(ch, nil, err) + } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go index 9819917b..315eab5f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { return n, err } +// Unwrap lets http.ResponseController get the underlying http.ResponseWriter, +// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface. +func (r *responseWriterDelegator) Unwrap() http.ResponseWriter { + return r.ResponseWriter +} + type ( closeNotifierDelegator struct{ *responseWriterDelegator } flusherDelegator struct{ *responseWriterDelegator } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 09b8d2fb..e598e66e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -38,12 +38,13 @@ import ( "io" "net/http" "strconv" - "strings" "sync" "time" + "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" + "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" ) @@ -54,6 +55,18 @@ const ( processStartTimeHeader = "Process-Start-Time-Unix" ) +// Compression represents the content encodings handlers support for the HTTP +// responses. +type Compression string + +const ( + Identity Compression = "identity" + Gzip Compression = "gzip" + Zstd Compression = "zstd" +) + +var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} + var gzipPool = sync.Pool{ New: func() interface{} { return gzip.NewWriter(nil) @@ -122,6 +135,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } } + // Select compression formats to offer based on default or user choice. + var compressions []string + if !opts.DisableCompression { + offers := defaultCompressionFormats + if len(opts.OfferedCompressions) > 0 { + offers = opts.OfferedCompressions + } + for _, comp := range offers { + compressions = append(compressions, string(comp)) + } + } + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { if !opts.ProcessStartTime.IsZero() { rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10)) @@ -165,21 +190,23 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO } else { contentType = expfmt.Negotiate(req.Header) } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) + rsp.Header().Set(contentTypeHeader, string(contentType)) - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) + w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions) + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error getting writer", err) + } + w = io.Writer(rsp) + encodingHeader = string(Identity) + } - gz.Reset(w) - defer gz.Close() + defer closeWriter() - w = gz + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling @@ -343,9 +370,19 @@ type HandlerOpts struct { // no effect on the HTTP status code because ErrorHandling is set to // ContinueOnError. Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. + // DisableCompression disables the response encoding (compression) and + // encoding negotiation. If true, the handler will + // never compress the response, even if requested + // by the client and the OfferedCompressions field is set. DisableCompression bool + // OfferedCompressions is a set of encodings (compressions) handler will + // try to offer when negotiating with the client. This defaults to identity, gzip + // and zstd. + // NOTE: If handler can't agree with the client on the encodings or + // unsupported or empty encodings are set in OfferedCompressions, + // handler always fallbacks to no compression (identity), for + // compatibility reasons. In such cases ErrorLog will be used if set. + OfferedCompressions []Compression // The number of concurrent HTTP requests is limited to // MaxRequestsInFlight. Additional requests are responded to with 503 // Service Unavailable and a suitable message in the body. If @@ -381,19 +418,6 @@ type HandlerOpts struct { ProcessStartTime time.Time } -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - // httpError removes any content-encoding header and then calls http.Error with // the provided error and http.StatusInternalServerError. Error contents is // supposed to be uncompressed plain text. Same as with a plain http.Error, this @@ -406,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) { http.StatusInternalServerError, ) } + +// negotiateEncodingWriter reads the Accept-Encoding header from a request and +// selects the right compression based on an allow-list of supported +// compressions. It returns a writer implementing the compression and an the +// correct value that the caller can set in the response header. +func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) { + if len(compressions) == 0 { + return rw, string(Identity), func() {}, nil + } + + // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented. + selected := httputil.NegotiateContentEncoding(r, compressions) + + switch selected { + case "zstd": + // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. + z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) + if err != nil { + return nil, "", func() {}, err + } + + z.Reset(rw) + return z, selected, func() { _ = z.Close() }, nil + case "gzip": + gz := gzipPool.Get().(*gzip.Writer) + gz.Reset(rw) + return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil + case "identity": + // This means the content is not compressed. + return rw, selected, func() {}, nil + default: + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index 5e2ced25..c6fd2f58 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error { if dimHash != desc.dimHash { return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash + continue + } + + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) } + continue } + newDimHashesByName[desc.fqName] = desc.dimHash } // A Collector yielding no Desc at all is considered unchecked. if len(newDescIDs) == 0 { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 14627044..1ab0e479 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -783,3 +783,45 @@ func MustNewConstSummary( } return m } + +// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp. +func NewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: MakeLabelPairs(desc, labelValues), + createdTs: timestamppb.New(ct), + }, nil +} + +// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where +// NewConstSummaryWithCreatedTimestamp would have returned an error. +func MustNewConstSummaryWithCreatedTimestamp( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + ct time.Time, + labelValues ...string, +) Metric { + m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go index 955cfd59..2c808eec 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues( return metric } -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// getOrCreateMetricWithLabels retrieves the metric by hash and label value // or creates it and returns the new one. // // This function holds the mutex. diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21..1448439b 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9..cf0c150c 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd..d942af8e 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93..11c8ff4b 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a..4b86434b 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211..f085a923 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22..73b7aa3e 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b4..abb2c900 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685..00000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a..f50966bc 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -34,10 +34,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/stoewer/go-strcase/.gitignore b/vendor/github.com/stoewer/go-strcase/.gitignore new file mode 100644 index 00000000..db5247b9 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/.gitignore @@ -0,0 +1,17 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +vendor +doc + +# Temporary files +*~ +*.swp + +# Editor and IDE config +.idea +*.iml +.vscode diff --git a/vendor/github.com/stoewer/go-strcase/.golangci.yml b/vendor/github.com/stoewer/go-strcase/.golangci.yml new file mode 100644 index 00000000..7f98d55c --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/.golangci.yml @@ -0,0 +1,26 @@ +run: + deadline: 10m + +linters: + enable: + - dupl + - goconst + - gocyclo + - godox + - gosec + - interfacer + - lll + - maligned + - misspell + - prealloc + - stylecheck + - unconvert + - unparam + - errcheck + - golint + - gofmt + disable: [] + fast: false + +issues: + exclude-use-default: false diff --git a/vendor/github.com/stoewer/go-strcase/LICENSE b/vendor/github.com/stoewer/go-strcase/LICENSE new file mode 100644 index 00000000..a105a381 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017, Adrian Stoewer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stoewer/go-strcase/README.md b/vendor/github.com/stoewer/go-strcase/README.md new file mode 100644 index 00000000..84a640e7 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/README.md @@ -0,0 +1,50 @@ +[![GH Actions](https://github.com/stoewer/go-strcase/actions/workflows/lint-test.yml/badge.svg?branch=master)](https://github.com/stoewer/go-strcase/actions) +[![codecov](https://codecov.io/github/stoewer/go-strcase/branch/master/graph/badge.svg?token=c0UokYnop5)](https://codecov.io/github/stoewer/go-strcase) +[![GoDoc](https://godoc.org/github.com/stoewer/go-strcase?status.svg)](https://pkg.go.dev/github.com/stoewer/go-strcase) +--- + +Go strcase +========== + +The package `strcase` converts between different kinds of naming formats such as camel case +(`CamelCase`), snake case (`snake_case`) or kebab case (`kebab-case`). +The package is designed to work only with strings consisting of standard ASCII letters. +Unicode is currently not supported. + +Versioning and stability +------------------------ + +Although the master branch is supposed to remain always backward compatible, the repository +contains version tags in order to support vendoring tools. +The tag names follow semantic versioning conventions and have the following format `v1.0.0`. +This package supports Go modules introduced with version 1.11. + +Example +------- + +```go +import "github.com/stoewer/go-strcase" + +var snake = strcase.SnakeCase("CamelCase") +``` + +Dependencies +------------ + +### Build dependencies + +* none + +### Test dependencies + +* `github.com/stretchr/testify` + +Run linters and unit tests +-------------------------- + +To run the static code analysis, linters and tests use the following commands: + +``` +golangci-lint run --config .golangci.yml ./... +go test ./... +``` diff --git a/vendor/github.com/stoewer/go-strcase/camel.go b/vendor/github.com/stoewer/go-strcase/camel.go new file mode 100644 index 00000000..ff9e66e0 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/camel.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +import ( + "strings" +) + +// UpperCamelCase converts a string into camel case starting with a upper case letter. +func UpperCamelCase(s string) string { + return camelCase(s, true) +} + +// LowerCamelCase converts a string into camel case starting with a lower case letter. +func LowerCamelCase(s string) string { + return camelCase(s, false) +} + +func camelCase(s string, upper bool) string { + s = strings.TrimSpace(s) + buffer := make([]rune, 0, len(s)) + + stringIter(s, func(prev, curr, next rune) { + if !isDelimiter(curr) { + if isDelimiter(prev) || (upper && prev == 0) { + buffer = append(buffer, toUpper(curr)) + } else if isLower(prev) { + buffer = append(buffer, curr) + } else if isUpper(prev) && isUpper(curr) && isLower(next) { + // Assume a case like "R" for "XRequestId" + buffer = append(buffer, curr) + } else { + buffer = append(buffer, toLower(curr)) + } + } + }) + + return string(buffer) +} diff --git a/vendor/github.com/stoewer/go-strcase/doc.go b/vendor/github.com/stoewer/go-strcase/doc.go new file mode 100644 index 00000000..3e441ca3 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/doc.go @@ -0,0 +1,8 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +// Package strcase converts between different kinds of naming formats such as camel case +// (CamelCase), snake case (snake_case) or kebab case (kebab-case). The package is designed +// to work only with strings consisting of standard ASCII letters. Unicode is currently not +// supported. +package strcase diff --git a/vendor/github.com/stoewer/go-strcase/helper.go b/vendor/github.com/stoewer/go-strcase/helper.go new file mode 100644 index 00000000..ecad5891 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/helper.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +// isLower checks if a character is lower case. More precisely it evaluates if it is +// in the range of ASCII character 'a' to 'z'. +func isLower(ch rune) bool { + return ch >= 'a' && ch <= 'z' +} + +// toLower converts a character in the range of ASCII characters 'A' to 'Z' to its lower +// case counterpart. Other characters remain the same. +func toLower(ch rune) rune { + if ch >= 'A' && ch <= 'Z' { + return ch + 32 + } + return ch +} + +// isLower checks if a character is upper case. More precisely it evaluates if it is +// in the range of ASCII characters 'A' to 'Z'. +func isUpper(ch rune) bool { + return ch >= 'A' && ch <= 'Z' +} + +// toLower converts a character in the range of ASCII characters 'a' to 'z' to its lower +// case counterpart. Other characters remain the same. +func toUpper(ch rune) rune { + if ch >= 'a' && ch <= 'z' { + return ch - 32 + } + return ch +} + +// isSpace checks if a character is some kind of whitespace. +func isSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' +} + +// isDelimiter checks if a character is some kind of whitespace or '_' or '-'. +func isDelimiter(ch rune) bool { + return ch == '-' || ch == '_' || isSpace(ch) +} + +// iterFunc is a callback that is called fro a specific position in a string. Its arguments are the +// rune at the respective string position as well as the previous and the next rune. If curr is at the +// first position of the string prev is zero. If curr is at the end of the string next is zero. +type iterFunc func(prev, curr, next rune) + +// stringIter iterates over a string, invoking the callback for every single rune in the string. +func stringIter(s string, callback iterFunc) { + var prev rune + var curr rune + for _, next := range s { + if curr == 0 { + prev = curr + curr = next + continue + } + + callback(prev, curr, next) + + prev = curr + curr = next + } + + if len(s) > 0 { + callback(prev, curr, 0) + } +} diff --git a/vendor/github.com/stoewer/go-strcase/kebab.go b/vendor/github.com/stoewer/go-strcase/kebab.go new file mode 100644 index 00000000..e9a64875 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/kebab.go @@ -0,0 +1,14 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +// KebabCase converts a string into kebab case. +func KebabCase(s string) string { + return delimiterCase(s, '-', false) +} + +// UpperKebabCase converts a string into kebab case with capital letters. +func UpperKebabCase(s string) string { + return delimiterCase(s, '-', true) +} diff --git a/vendor/github.com/stoewer/go-strcase/snake.go b/vendor/github.com/stoewer/go-strcase/snake.go new file mode 100644 index 00000000..1b216e20 --- /dev/null +++ b/vendor/github.com/stoewer/go-strcase/snake.go @@ -0,0 +1,58 @@ +// Copyright (c) 2017, A. Stoewer +// All rights reserved. + +package strcase + +import ( + "strings" +) + +// SnakeCase converts a string into snake case. +func SnakeCase(s string) string { + return delimiterCase(s, '_', false) +} + +// UpperSnakeCase converts a string into snake case with capital letters. +func UpperSnakeCase(s string) string { + return delimiterCase(s, '_', true) +} + +// delimiterCase converts a string into snake_case or kebab-case depending on the delimiter passed +// as second argument. When upperCase is true the result will be UPPER_SNAKE_CASE or UPPER-KEBAB-CASE. +func delimiterCase(s string, delimiter rune, upperCase bool) string { + s = strings.TrimSpace(s) + buffer := make([]rune, 0, len(s)+3) + + adjustCase := toLower + if upperCase { + adjustCase = toUpper + } + + var prev rune + var curr rune + for _, next := range s { + if isDelimiter(curr) { + if !isDelimiter(prev) { + buffer = append(buffer, delimiter) + } + } else if isUpper(curr) { + if isLower(prev) || (isUpper(prev) && isLower(next)) { + buffer = append(buffer, delimiter) + } + buffer = append(buffer, adjustCase(curr)) + } else if curr != 0 { + buffer = append(buffer, adjustCase(curr)) + } + prev = curr + curr = next + } + + if len(s) > 0 { + if isUpper(curr) && isLower(prev) && prev != 0 { + buffer = append(buffer, delimiter) + } + buffer = append(buffer, adjustCase(curr)) + } + + return string(buffer) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf5..a83a0262 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,20 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index f0a9bb9e..a01bfafb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index d01bdccf..e4236ab3 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,11 +9,9 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -24,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -34,10 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -88,36 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName -} - -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) + h.semconv = semconv.NewHTTPServer(c.Meter) } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -134,7 +95,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -166,14 +127,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -183,13 +142,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,35 +170,39 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) - h.responseBytesCounter.Add(ctx, rww.written, addOpts...) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 00000000..a945f556 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 00000000..fbc344cb --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,119 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.wroteHeader { + w.writeHeader(http.StatusOK) + } + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3ec0ad00..fb893b25 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -4,6 +4,7 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" "os" @@ -11,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) type ResponseTelemetry struct { @@ -23,6 +25,11 @@ type ResponseTelemetry struct { type HTTPServer struct { duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue { return oldHTTPServer{}.Route(route) } -func NewHTTPServer() HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) - return HTTPServer{duplicate: env == "http/dup"} -} - -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -80,3 +82,146 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue +} + +type MetricData struct { + RequestSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram +} + +func NewHTTPClient(meter metric.Meter) HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + return client +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go similarity index 57% rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 0c5d4c46..745b8c67 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -4,11 +4,14 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "fmt" "net/http" + "reflect" + "strconv" "strings" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) type newHTTPServer struct{} @@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke func (n newHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e7f29376..e6e14924 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -9,8 +9,9 @@ import ( "strconv" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. } func requiredHTTPPort(https bool, port int) int { // nolint:revive @@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{ http.MethodPut: semconvNew.HTTPRequestMethodPut, http.MethodTrace: semconvNew.HTTPRequestMethodTrace, } + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c3e838aa..5367732e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -7,9 +7,13 @@ import ( "errors" "io" "net/http" + "slices" + "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) @@ -72,3 +76,199 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} + +func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index a9a9226b..b80a1db6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 0d3cb2e4..39681ad4 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,13 +11,13 @@ import ( "sync/atomic" "time" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) @@ -26,17 +26,15 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace - - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -63,43 +61,19 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace -} - -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) + t.semconv = semconv.NewHTTPClient(c.Meter) + t.metricAttributesFn = c.MetricAttributesFn } func defaultTransportFormatter(_ string, r *http.Request) string { @@ -143,54 +117,68 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) + // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, addOpts...) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil +} + +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest } // newWrappedBody returns a new and appropriately scoped *wrappedBody as an diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index b0957f28..a07d8689 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.53.0" + return "0.56.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 948f8406..00000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *respWriterWrapper) Flush() { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - - if f, ok := w.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b64..d0955550 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint @@ -23,6 +25,7 @@ linters: - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused @@ -62,12 +65,12 @@ issues: - path: _test\.go linters: - gosec - # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) + # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand) # as we commonly use it in tests and examples. - text: "G404:" linters: - gosec - # Igonoring gosec G402: TLS MinVersion too low + # Ignoring gosec G402: TLS MinVersion too low # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well. - text: "G402: TLS MinVersion too low." linters: @@ -300,3 +303,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998..4b361d02 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,112 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + +## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 + +### Added + +- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739) +- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773) +- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773) +- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755) + +### Fixed + +- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754) +- Fix panic on instruments creation when setting meter provider. (#5758) +- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780) + +### Removed + +- Drop support for [Go 1.21]. (#5736, #5740, #5800) + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +155,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +282,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -1836,7 +1943,7 @@ with major version 0. - Setting error status while recording error with Span from oteltest package. (#1729) - The concept of a remote and local Span stored in a context is unified to just the current Span. Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span. If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) - The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) @@ -2410,7 +2517,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco - Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) - Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) - Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) +- Update otel-collector example to use the v0.5.0 collector. (#915) - The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) - The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) - The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. @@ -3003,7 +3110,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 +[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3196,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 20255493..945a07d2 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,13 +5,13 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f5..bb339655 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -578,7 +578,10 @@ See also: The tests should never leak goroutines. Use the term `ConcurrentSafe` in the test name when it aims to verify the -absence of race conditions. +absence of race conditions. The top-level tests with this term will be run +many times in the `test-concurrent-safe` CI job to increase the chance of +catching concurrency issues. This does not apply to subtests when this term +is not in their root name. ### Internal packages @@ -628,11 +631,8 @@ should be canceled. ### Approvers -- [Chester Cheung](https://github.com/hanyuancheung), Tencent - ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -641,16 +641,18 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f7..a1228a21 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker @@ -145,12 +142,14 @@ build-tests/%: # Tests -TEST_TARGETS := test-default test-bench test-short test-verbose test-race +TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe .PHONY: $(TEST_TARGETS) test test-default test-race: ARGS=-race test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-short: ARGS=-short test-verbose: ARGS=-v -race +test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race +test-concurrent-safe: TIMEOUT=120 $(TEST_TARGETS): test test: $(OTEL_GO_MOD_DIRS:%=test/%) test/%: DIR=$* @@ -178,17 +177,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a890931..efec2789 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,22 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -87,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3..ffa9b612 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. @@ -110,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fd..6cbefcea 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896c..36f53670 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a..2acbac35 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c5950..921f8596 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md new file mode 100644 index 00000000..50802d5a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -0,0 +1,3 @@ +# OTLP Trace Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go new file mode 100644 index 00000000..3c1a625c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Client manages connections to the collector, handles the +// transformation of data into wire format, and the transmission of that +// data to the collector. +type Client interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Start should establish connection(s) to endpoint(s). It is + // called just once by the exporter, so the implementation + // does not need to worry about idempotence and locking. + Start(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Stop should close the connections. The function is called + // only once by the exporter, so the implementation does not + // need to worry about idempotence, but it may be called + // concurrently with UploadTraces, so proper + // locking is required. The function serves as a + // synchronization point - after the function returns, the + // process of closing connections is assumed to be finished. + Stop(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // UploadTraces should transform the passed traces to the wire + // format and send it to the collector. May be called + // concurrently. + UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go new file mode 100644 index 00000000..09ad5ead --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptrace contains abstractions for OTLP span exporters. +See the official OTLP span exporter implementations: + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc], + - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]. +*/ +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go new file mode 100644 index 00000000..3f0a518a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -0,0 +1,105 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + tracesdk "go.opentelemetry.io/otel/sdk/trace" +) + +var errAlreadyStarted = errors.New("already started") + +// Exporter exports trace data in the OTLP wire format. +type Exporter struct { + client Client + + mu sync.RWMutex + started bool + + startOnce sync.Once + stopOnce sync.Once +} + +// ExportSpans exports a batch of spans. +func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error { + protoSpans := tracetransform.Spans(ss) + if len(protoSpans) == 0 { + return nil + } + + err := e.client.UploadTraces(ctx, protoSpans) + if err != nil { + return fmt.Errorf("traces export: %w", err) + } + return nil +} + +// Start establishes a connection to the receiving endpoint. +func (e *Exporter) Start(ctx context.Context) error { + err := errAlreadyStarted + e.startOnce.Do(func() { + e.mu.Lock() + e.started = true + e.mu.Unlock() + err = e.client.Start(ctx) + }) + + return err +} + +// Shutdown flushes all exports and closes all connections to the receiving endpoint. +func (e *Exporter) Shutdown(ctx context.Context) error { + e.mu.RLock() + started := e.started + e.mu.RUnlock() + + if !started { + return nil + } + + var err error + + e.stopOnce.Do(func() { + err = e.client.Stop(ctx) + e.mu.Lock() + e.started = false + e.mu.Unlock() + }) + + return err +} + +var _ tracesdk.SpanExporter = (*Exporter)(nil) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, client Client) (*Exporter, error) { + exp := NewUnstarted(client) + if err := exp.Start(ctx); err != nil { + return nil, err + } + return exp, nil +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(client Client) *Exporter { + return &Exporter{ + client: client, + } +} + +// MarshalLog is the marshaling function used by the logging system to represent this Exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct { + Type string + Client Client + }{ + Type: "otlptrace", + Client: e.client, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go new file mode 100644 index 00000000..4571a5ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -0,0 +1,147 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// Iterator transforms an attribute iterator into OTLP key-values. +func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// ResourceAttributes transforms a Resource OTLP key-values. +func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { + return Iterator(res.Iter()) +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { + return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *commonpb.AnyValue { + av := new(commonpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &commonpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &commonpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &commonpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go new file mode 100644 index 00000000..f6dd3dec --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { + if il == (instrumentation.Scope{}) { + return nil + } + return &commonpb.InstrumentationScope{ + Name: il.Name, + Version: il.Version, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go new file mode 100644 index 00000000..db7b698a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/resource" + resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// Resource transforms a Resource into an OTLP Resource. +func Resource(r *resource.Resource) *resourcepb.Resource { + if r == nil { + return nil + } + return &resourcepb.Resource{Attributes: ResourceAttributes(r)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go new file mode 100644 index 00000000..bf27ef02 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -0,0 +1,219 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "math" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP +// ResourceSpans. +func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { + if len(sdl) == 0 { + return nil + } + + rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + ssm := make(map[key]*tracepb.ScopeSpans) + + var resources int + for _, sd := range sdl { + if sd == nil { + continue + } + + rKey := sd.Resource().Equivalent() + k := key{ + r: rKey, + is: sd.InstrumentationScope(), + } + scopeSpan, iOk := ssm[k] + if !iOk { + // Either the resource or instrumentation scope were unknown. + scopeSpan = &tracepb.ScopeSpans{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + Spans: []*tracepb.Span{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, + } + } + scopeSpan.Spans = append(scopeSpan.Spans, span(sd)) + ssm[k] = scopeSpan + + rs, rOk := rsm[rKey] + if !rOk { + resources++ + // The resource was unknown. + rs = &tracepb.ResourceSpans{ + Resource: Resource(sd.Resource()), + ScopeSpans: []*tracepb.ScopeSpans{scopeSpan}, + SchemaUrl: sd.Resource().SchemaURL(), + } + rsm[rKey] = rs + continue + } + + // The resource has been seen before. Check if the instrumentation + // library lookup was unknown because if so we need to add it to the + // ResourceSpans. Otherwise, the instrumentation library has already + // been seen and the append we did above will be included it in the + // ScopeSpans reference. + if !iOk { + rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) + } + } + + // Transform the categorized map into a slice + rss := make([]*tracepb.ResourceSpans, 0, resources) + for _, rs := range rsm { + rss = append(rss, rs) + } + return rss +} + +// span transforms a Span into an OTLP span. +func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { + if sd == nil { + return nil + } + + tid := sd.SpanContext().TraceID() + sid := sd.SpanContext().SpanID() + + s := &tracepb.Span{ + TraceId: tid[:], + SpanId: sid[:], + TraceState: sd.SpanContext().TraceState().String(), + Status: status(sd.Status().Code, sd.Status().Description), + StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked. + EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked. + Links: links(sd.Links()), + Kind: spanKind(sd.SpanKind()), + Name: sd.Name(), + Attributes: KeyValues(sd.Attributes()), + Events: spanEvents(sd.Events()), + DroppedAttributesCount: clampUint32(sd.DroppedAttributes()), + DroppedEventsCount: clampUint32(sd.DroppedEvents()), + DroppedLinksCount: clampUint32(sd.DroppedLinks()), + } + + if psid := sd.Parent().SpanID(); psid.IsValid() { + s.ParentSpanId = psid[:] + } + s.Flags = buildSpanFlags(sd.Parent()) + + return s +} + +func clampUint32(v int) uint32 { + if v < 0 { + return 0 + } + if int64(v) > math.MaxUint32 { + return math.MaxUint32 + } + return uint32(v) // nolint: gosec // Overflow/Underflow checked. +} + +// status transform a span code and message into an OTLP span status. +func status(status codes.Code, message string) *tracepb.Status { + var c tracepb.Status_StatusCode + switch status { + case codes.Ok: + c = tracepb.Status_STATUS_CODE_OK + case codes.Error: + c = tracepb.Status_STATUS_CODE_ERROR + default: + c = tracepb.Status_STATUS_CODE_UNSET + } + return &tracepb.Status{ + Code: c, + Message: message, + } +} + +// links transforms span Links to OTLP span links. +func links(links []tracesdk.Link) []*tracepb.Span_Link { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, otLink := range links { + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + tid := otLink.SpanContext.TraceID() + sid := otLink.SpanContext.SpanID() + + flags := buildSpanFlags(otLink.SpanContext) + + sl = append(sl, &tracepb.Span_Link{ + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount), + Flags: flags, + }) + } + return sl +} + +func buildSpanFlags(sc trace.SpanContext) uint32 { + flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK + if sc.IsRemote() { + flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK + } + + return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative +} + +// spanEvents transforms span Events to an OTLP span events. +func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { + if len(es) == 0 { + return nil + } + + events := make([]*tracepb.Span_Event, len(es)) + // Transform message events + for i := 0; i < len(es); i++ { + events[i] = &tracepb.Span_Event{ + Name: es[i].Name, + TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked. + Attributes: KeyValues(es[i].Attributes), + DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount), + } + } + return events +} + +// spanKind transforms a SpanKind to an OTLP span kind. +func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindInternal: + return tracepb.Span_SPAN_KIND_INTERNAL + case trace.SpanKindClient: + return tracepb.Span_SPAN_KIND_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SPAN_KIND_SERVER + case trace.SpanKindProducer: + return tracepb.Span_SPAN_KIND_PRODUCER + case trace.SpanKindConsumer: + return tracepb.Span_SPAN_KIND_CONSUMER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md new file mode 100644 index 00000000..5309bb7c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md @@ -0,0 +1,3 @@ +# OTLP Trace gRPC Exporter + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go new file mode 100644 index 00000000..3993df92 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -0,0 +1,295 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + "errors" + "sync" + "time" + + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +type client struct { + endpoint string + dialOpts []grpc.DialOption + metadata metadata.MD + exportTimeout time.Duration + requestFunc retry.RequestFunc + + // stopCtx is used as a parent context for all exports. Therefore, when it + // is canceled with the stopFunc all exports are canceled. + stopCtx context.Context + // stopFunc cancels stopCtx, stopping any active exports. + stopFunc context.CancelFunc + + // ourConn keeps track of where conn was created: true if created here on + // Start, or false if passed with an option. This is important on Shutdown + // as the conn should only be closed if created here on start. Otherwise, + // it is up to the processes that passed the conn to close it. + ourConn bool + conn *grpc.ClientConn + tscMu sync.RWMutex + tsc coltracepb.TraceServiceClient +} + +// Compile time check *client implements otlptrace.Client. +var _ otlptrace.Client = (*client)(nil) + +// NewClient creates a new gRPC trace client. +func NewClient(opts ...Option) otlptrace.Client { + return newClient(opts...) +} + +func newClient(opts ...Option) *client { + cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) + + ctx, cancel := context.WithCancel(context.Background()) + + c := &client{ + endpoint: cfg.Traces.Endpoint, + exportTimeout: cfg.Traces.Timeout, + requestFunc: cfg.RetryConfig.RequestFunc(retryable), + dialOpts: cfg.DialOptions, + stopCtx: ctx, + stopFunc: cancel, + conn: cfg.GRPCConn, + } + + if len(cfg.Traces.Headers) > 0 { + c.metadata = metadata.New(cfg.Traces.Headers) + } + + return c +} + +// Start establishes a gRPC connection to the collector. +func (c *client) Start(context.Context) error { + if c.conn == nil { + // If the caller did not provide a ClientConn when the client was + // created, create one using the configuration they did provide. + conn, err := grpc.NewClient(c.endpoint, c.dialOpts...) + if err != nil { + return err + } + // Keep track that we own the lifecycle of this conn and need to close + // it on Shutdown. + c.ourConn = true + c.conn = conn + } + + // The otlptrace.Client interface states this method is called just once, + // so no need to check if already started. + c.tscMu.Lock() + c.tsc = coltracepb.NewTraceServiceClient(c.conn) + c.tscMu.Unlock() + + return nil +} + +var errAlreadyStopped = errors.New("the client is already stopped") + +// Stop shuts down the client. +// +// Any active connections to a remote endpoint are closed if they were created +// by the client. Any gRPC connection passed during creation using +// WithGRPCConn will not be closed. It is the caller's responsibility to +// handle cleanup of that resource. +// +// This method synchronizes with the UploadTraces method of the client. It +// will wait for any active calls to that method to complete unimpeded, or it +// will cancel any active calls if ctx expires. If ctx expires, the context +// error will be forwarded as the returned error. All client held resources +// will still be released in this situation. +// +// If the client has already stopped, an error will be returned describing +// this. +func (c *client) Stop(ctx context.Context) error { + // Make sure to return context error if the context is done when calling this method. + err := ctx.Err() + + // Acquire the c.tscMu lock within the ctx lifetime. + acquired := make(chan struct{}) + go func() { + c.tscMu.Lock() + close(acquired) + }() + + select { + case <-ctx.Done(): + // The Stop timeout is reached. Kill any remaining exports to force + // the clear of the lock and save the timeout error to return and + // signal the shutdown timed out before cleanly stopping. + c.stopFunc() + err = ctx.Err() + + // To ensure the client is not left in a dirty state c.tsc needs to be + // set to nil. To avoid the race condition when doing this, ensure + // that all the exports are killed (initiated by c.stopFunc). + <-acquired + case <-acquired: + } + // Hold the tscMu lock for the rest of the function to ensure no new + // exports are started. + defer c.tscMu.Unlock() + + // The otlptrace.Client interface states this method is called only + // once, but there is no guarantee it is called after Start. Ensure the + // client is started before doing anything and let the called know if they + // made a mistake. + if c.tsc == nil { + return errAlreadyStopped + } + + // Clear c.tsc to signal the client is stopped. + c.tsc = nil + + if c.ourConn { + closeErr := c.conn.Close() + // A context timeout error takes precedence over this error. + if err == nil && closeErr != nil { + err = closeErr + } + } + return err +} + +var errShutdown = errors.New("the client is shutdown") + +// UploadTraces sends a batch of spans. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + // Hold a read lock to ensure a shut down initiated after this starts does + // not abandon the export. This read lock acquire has less priority than a + // write lock acquire (i.e. Stop), meaning if the client is shutting down + // this will come after the shut down. + c.tscMu.RLock() + defer c.tscMu.RUnlock() + + if c.tsc == nil { + return errShutdown + } + + ctx, cancel := c.exportContext(ctx) + defer cancel() + + return c.requestFunc(ctx, func(iCtx context.Context) error { + resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } + // nil is converted to OK. + if status.Code(err) == codes.OK { + // Success. + return nil + } + return err + }) +} + +// exportContext returns a copy of parent with an appropriate deadline and +// cancellation function. +// +// It is the callers responsibility to cancel the returned context once its +// use is complete, via the parent or directly with the returned CancelFunc, to +// ensure all resources are correctly released. +func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + + if c.exportTimeout > 0 { + ctx, cancel = context.WithTimeout(parent, c.exportTimeout) + } else { + ctx, cancel = context.WithCancel(parent) + } + + if c.metadata.Len() > 0 { + ctx = metadata.NewOutgoingContext(ctx, c.metadata) + } + + // Unify the client stopCtx with the parent. + go func() { + select { + case <-ctx.Done(): + case <-c.stopCtx.Done(): + // Cancel the export as the shutdown has timed out. + cancel() + } + }() + + return ctx, cancel +} + +// retryable returns if err identifies a request that can be retried and a +// duration to wait for if an explicit throttle time is included in err. +func retryable(err error) (bool, time.Duration) { + s := status.Convert(err) + return retryableGRPCStatus(s) +} + +func retryableGRPCStatus(s *status.Status) (bool, time.Duration) { + switch s.Code() { + case codes.Canceled, + codes.DeadlineExceeded, + codes.Aborted, + codes.OutOfRange, + codes.Unavailable, + codes.DataLoss: + // Additionally handle RetryInfo. + _, d := throttleDelay(s) + return true, d + case codes.ResourceExhausted: + // Retry only if the server signals that the recovery from resource exhaustion is possible. + return throttleDelay(s) + } + + // Not a retry-able error. + return false, 0 +} + +// throttleDelay returns of the status is RetryInfo +// and the its duration to wait for if an explicit throttle time. +func throttleDelay(s *status.Status) (bool, time.Duration) { + for _, detail := range s.Details() { + if t, ok := detail.(*errdetails.RetryInfo); ok { + return true, t.RetryDelay.AsDuration() + } + } + return false, 0 +} + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (c *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + }{ + Type: "otlphttpgrpc", + Endpoint: c.endpoint, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go new file mode 100644 index 00000000..b7bd429f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package otlptracegrpc provides an OTLP span exporter using gRPC. +By default the telemetry is sent to https://localhost:4317. + +Exporter should be created using [New]. + +The environment variables described below can be used for configuration. + +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") - +target to which the exporter sends telemetry. +The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md. +The value must contain a scheme ("http" or "https") and host. +The value may additionally contain a port, and a path. +The value should not contain a query string or fragment. +OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT. +The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") - +setting "true" disables client transport security for the exporter's gRPC connection. +You can use this only when an endpoint is provided without the http or https scheme. +OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides +the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT. +OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE. +The configuration can be overridden by [WithInsecure], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) - +key-value pairs used as gRPC metadata associated with gRPC requests. +The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format], +except that additional semi-colon delimited metadata is not supported. +Example value: "key1=value1,key2=value2". +OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS. +The configuration can be overridden by [WithHeaders] option. + +OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") - +maximum time in milliseconds the OTLP exporter waits for each batch export. +OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT. +The configuration can be overridden by [WithTimeout] option. + +OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) - +the gRPC compressor the exporter uses. +Supported value: "gzip". +OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION. +The configuration can be overridden by [WithCompressor], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) - +the filepath to the trusted certificate to use when verifying a server's TLS credentials. +OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) - +the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options. + +OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) - +the filepath to the client's private key to use in mTLS communication in PEM format. +OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY. +The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option. + +[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content +*/ +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go new file mode 100644 index 00000000..b826b842 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" +) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) { + return otlptrace.New(ctx, NewClient(opts...)) +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(opts ...Option) *otlptrace.Exporter { + return otlptrace.NewUnstarted(NewClient(opts...)) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go new file mode 100644 index 00000000..4abf48d1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go @@ -0,0 +1,215 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/envconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + "unicode" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + n, v, found := strings.Cut(header, "=") + if !found { + global.Error(errors.New("missing '="), "parse headers", "input", header) + continue + } + + trimmedName := strings.TrimSpace(n) + + // Validate the key. + if !isValidHeaderKey(trimmedName) { + global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName) + continue + } + + // Only decode the value. + value, err := url.PathUnescape(v) + if err != nil { + global.Error(err, "escape header value", "value", v) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} + +func isValidHeaderKey(key string) bool { + if key == "" { + return false + } + for _, c := range key { + if !isTokenChar(c) { + return false + } + } + return true +} + +func isTokenChar(c rune) bool { + return c <= unicode.MaxASCII && (unicode.IsLetter(c) || + unicode.IsDigit(c) || + c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' || + c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~') +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go new file mode 100644 index 00000000..97cd6c54 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go new file mode 100644 index 00000000..7bb189a9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go @@ -0,0 +1,142 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + "os" + "path" + "strings" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig" +) + +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", +} + +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. +func ApplyGRPCEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + return cfg +} + +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + return cfg +} + +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} + + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For endpoint URLs for OTLP/HTTP per-signal variables, the + // URL MUST be used as-is without any modification. The only + // exception is that if an URL contains no path part, the root + // path / MUST be used. + path := u.Path + if path == "" { + path = "/" + } + cfg.Traces.URLPath = path + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) + + return opts +} + +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { + case "http", "unix": + return WithInsecure() + default: + return WithSecure() + } +} + +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + } +} + +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } + } +} + +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() + } + return WithSecure() +} + +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) + } + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go new file mode 100644 index 00000000..8d4b4bf0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -0,0 +1,353 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + "path" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/encoding/gzip" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + "go.opentelemetry.io/otel/internal/global" +) + +const ( + // DefaultTracesPath is a default URL path for endpoint that + // receives spans. + DefaultTracesPath string = "/v1/traces" + // DefaultTimeout is a default max waiting time for the backend to process + // each span batch. + DefaultTimeout time.Duration = 10 * time.Second +) + +type ( + // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request. + // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client. + HTTPTransportProxyFunc func(*http.Request) (*url.URL, error) + + SignalConfig struct { + Endpoint string + Insecure bool + TLSCfg *tls.Config + Headers map[string]string + Compression Compression + Timeout time.Duration + URLPath string + + // gRPC configurations + GRPCCredentials credentials.TransportCredentials + + Proxy HTTPTransportProxyFunc + } + + Config struct { + // Signal specific configurations + Traces SignalConfig + + RetryConfig retry.Config + + // gRPC configurations + ReconnectionPeriod time.Duration + ServiceConfig string + DialOptions []grpc.DialOption + GRPCConn *grpc.ClientConn + } +) + +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + } + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg +} + +// cleanPath returns a path with all spaces trimmed and all redundancies +// removed. If urlPath is empty or cleaning it results in an empty string, +// defaultPath is returned instead. +func cleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} + +// NewGRPCConfig returns a new Config with all settings applied from opts and +// any unset setting using the default gRPC config values. +func NewGRPCConfig(opts ...GRPCOption) Config { + userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)}, + } + cfg = ApplyGRPCEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyGRPCOption(cfg) + } + + if cfg.ServiceConfig != "" { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig)) + } + // Prioritize GRPCCredentials over Insecure (passing both is an error). + if cfg.Traces.GRPCCredentials != nil { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials)) + } else if cfg.Traces.Insecure { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + // Default to using the host's root CA. + creds := credentials.NewTLS(nil) + cfg.Traces.GRPCCredentials = creds + cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds)) + } + if cfg.Traces.Compression == GzipCompression { + cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name))) + } + if cfg.ReconnectionPeriod != 0 { + p := grpc.ConnectParams{ + Backoff: backoff.DefaultConfig, + MinConnectTimeout: cfg.ReconnectionPeriod, + } + cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p)) + } + + return cfg +} + +type ( + // GenericOption applies an option to the HTTP or gRPC driver. + GenericOption interface { + ApplyHTTPOption(Config) Config + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // HTTPOption applies an option to the HTTP driver. + HTTPOption interface { + ApplyHTTPOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } + + // GRPCOption applies an option to the gRPC driver. + GRPCOption interface { + ApplyGRPCOption(Config) Config + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() + } +) + +// genericOption is an option that applies the same logic +// for both gRPC and HTTP. +type genericOption struct { + fn func(Config) Config +} + +func (g *genericOption) ApplyGRPCOption(cfg Config) Config { + return g.fn(cfg) +} + +func (g *genericOption) ApplyHTTPOption(cfg Config) Config { + return g.fn(cfg) +} + +func (genericOption) private() {} + +func newGenericOption(fn func(cfg Config) Config) GenericOption { + return &genericOption{fn: fn} +} + +// splitOption is an option that applies different logics +// for gRPC and HTTP. +type splitOption struct { + httpFn func(Config) Config + grpcFn func(Config) Config +} + +func (g *splitOption) ApplyGRPCOption(cfg Config) Config { + return g.grpcFn(cfg) +} + +func (g *splitOption) ApplyHTTPOption(cfg Config) Config { + return g.httpFn(cfg) +} + +func (splitOption) private() {} + +func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption { + return &splitOption{httpFn: httpFn, grpcFn: grpcFn} +} + +// httpOption is an option that is only applied to the HTTP driver. +type httpOption struct { + fn func(Config) Config +} + +func (h *httpOption) ApplyHTTPOption(cfg Config) Config { + return h.fn(cfg) +} + +func (httpOption) private() {} + +func NewHTTPOption(fn func(cfg Config) Config) HTTPOption { + return &httpOption{fn: fn} +} + +// grpcOption is an option that is only applied to the gRPC driver. +type grpcOption struct { + fn func(Config) Config +} + +func (h *grpcOption) ApplyGRPCOption(cfg Config) Config { + return h.fn(cfg) +} + +func (grpcOption) private() {} + +func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { + return &grpcOption{fn: fn} +} + +// Generic Options + +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. +func WithEndpoint(endpoint string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Endpoint = endpoint + return cfg + }) +} + +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". +func WithEndpointURL(v string) GenericOption { + return newGenericOption(func(cfg Config) Config { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "otlptrace: parse endpoint url", "url", v) + return cfg + } + + cfg.Traces.Endpoint = u.Host + cfg.Traces.URLPath = u.Path + if u.Scheme != "https" { + cfg.Traces.Insecure = true + } + + return cfg + }) +} + +func WithCompression(compression Compression) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Compression = compression + return cfg + }) +} + +func WithURLPath(urlPath string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.URLPath = urlPath + return cfg + }) +} + +func WithRetry(rc retry.Config) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.RetryConfig = rc + return cfg + }) +} + +func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption { + return newSplitOption(func(cfg Config) Config { + cfg.Traces.TLSCfg = tlsCfg.Clone() + return cfg + }, func(cfg Config) Config { + cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg) + return cfg + }) +} + +func WithInsecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = true + return cfg + }) +} + +func WithSecure() GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Insecure = false + return cfg + }) +} + +func WithHeaders(headers map[string]string) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Headers = headers + return cfg + }) +} + +func WithTimeout(duration time.Duration) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Timeout = duration + return cfg + }) +} + +func WithProxy(pf HTTPTransportProxyFunc) GenericOption { + return newGenericOption(func(cfg Config) Config { + cfg.Traces.Proxy = pf + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go new file mode 100644 index 00000000..3d4f699d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go @@ -0,0 +1,40 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +const ( + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 + // DefaultCollectorHost is the host address the Exporter will attempt + // connect to if no collector address is provided. + DefaultCollectorHost string = "localhost" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression int + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression Compression = iota + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression +) + +// Marshaler describes the kind of message format sent to the collector. +type Marshaler int + +const ( + // MarshalProto tells the driver to send using the protobuf binary format. + MarshalProto Marshaler = iota + // MarshalJSON tells the driver to send using json format. + MarshalJSON +) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go new file mode 100644 index 00000000..38b97a01 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go @@ -0,0 +1,26 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" +) + +// CreateTLSConfig creates a tls.Config from a raw certificate bytes +// to verify a server certificate. +func CreateTLSConfig(certBytes []byte) (*tls.Config, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + + return &tls.Config{ + RootCAs: cp, + }, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go new file mode 100644 index 00000000..a12ea4c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go @@ -0,0 +1,56 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/partialsuccess.go + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go new file mode 100644 index 00000000..1c5450ab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %w", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go new file mode 100644 index 00000000..00ab1f20 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -0,0 +1,210 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry" +) + +// Option applies an option to the gRPC driver. +type Option interface { + applyGRPCOption(otlpconfig.Config) otlpconfig.Config +} + +func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { + converted := make([]otlpconfig.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying export of span batches that +// failed to be received by the target endpoint. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + otlpconfig.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables client transport security for the exporter's gRPC +// connection just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by +// default, client security is required unless WithInsecure is used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{otlpconfig.WithInsecure()} +} + +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4317" (no +// scheme or path). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpointURL are used, the last used option will +// take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{otlpconfig.WithEndpoint(endpoint)} +} + +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) +// the Exporter will connect to. The provided endpoint URL should resemble +// "https://example.com:4318/v1/traces". +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both environment variables are set, +// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment +// variable is set, and this option is passed, this option will take precedence. +// +// If both this option and WithEndpoint are used, the last used option will +// take precedence. +// +// If an invalid URL is provided, the default value will be kept. +// +// By default, if an environment variable is not set, and this option is not +// passed, "https://localhost:4317/v1/traces" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpointURL(u string) Option { + return wrappedOption{otlpconfig.WithEndpointURL(u)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) otlpconfig.Compression { + if compressor == "gzip" { + return otlpconfig.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return otlpconfig.NoCompression +} + +// WithCompressor sets the compressor for the gRPC client to use when sending +// requests. Supported compressor values: "gzip". +func WithCompressor(compressor string) Option { + return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{otlpconfig.WithHeaders(headers)} +} + +// WithTLSCredentials allows the connection to use TLS credentials when +// talking to the server. It takes in grpc.TransportCredentials instead of say +// a Certificate file or a tls.Certificate, because the retrieving of these +// credentials can be done in many ways e.g. plain file, in code tls.Config or +// by certificate rotation, so it is up to the caller to decide what to use. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.Traces.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when making a +// connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError] +// grpc.DialOptions are ignored. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The client +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time a client will attempt to export a +// batch of spans. This takes precedence over any retry settings defined with +// WithRetry, once this time limit has been reached the export is abandoned +// and the batch of spans is dropped. +// +// If unset, the default timeout will be set to 10 seconds. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{otlpconfig.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that may be +// returned by the target endpoint when exporting a batch of spans. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response. That time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go new file mode 100644 index 00000000..709c8f70 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. +func Version() string { + return "1.31.0" +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index cfd1df9b..e3db438a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -5,8 +5,8 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "container/list" + "reflect" "sync" - "sync/atomic" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/metric/embedded" @@ -76,7 +76,7 @@ func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Me return val } - t := &meter{name: name, opts: opts} + t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)} p.meters[key] = t return t } @@ -92,17 +92,29 @@ type meter struct { opts []metric.MeterOption mtx sync.Mutex - instruments []delegatedInstrument + instruments map[instID]delegatedInstrument registry list.List - delegate atomic.Value // metric.Meter + delegate metric.Meter } type delegatedInstrument interface { setDelegate(metric.Meter) } +// instID are the identifying properties of a instrument. +type instID struct { + // name is the name of the stream. + name string + // description is the description of the stream. + description string + // kind defines the functional group of the instrument. + kind reflect.Type + // unit is the unit of the stream. + unit string +} + // setDelegate configures m to delegate all Meter functionality to Meters // created by provider. // @@ -110,12 +122,12 @@ type delegatedInstrument interface { // // It is guaranteed by the caller that this happens only once. func (m *meter) setDelegate(provider metric.MeterProvider) { - meter := provider.Meter(m.name, m.opts...) - m.delegate.Store(meter) - m.mtx.Lock() defer m.mtx.Unlock() + meter := provider.Meter(m.name, m.opts...) + m.delegate = meter + for _, inst := range m.instruments { inst.setDelegate(meter) } @@ -133,169 +145,337 @@ func (m *meter) setDelegate(provider metric.MeterProvider) { } func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Counter(name, options...) + } + + cfg := metric.NewInt64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } i := &siCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64UpDownCounter(name, options...) + } + + cfg := metric.NewInt64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } i := &siUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Histogram(name, options...) + } + + cfg := metric.NewInt64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } i := &siHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64Gauge(name, options...) + } + + cfg := metric.NewInt64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*siGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } i := &siGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableCounter(name, options...) + } + + cfg := metric.NewInt64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } i := &aiCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } i := &aiUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Int64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Int64ObservableGauge(name, options...) + } + + cfg := metric.NewInt64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*aiGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } i := &aiGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Counter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Counter(name, options...) + } + + cfg := metric.NewFloat64CounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } i := &sfCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64UpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64UpDownCounter(name, options...) + } + + cfg := metric.NewFloat64UpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } i := &sfUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Histogram(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Histogram(name, options...) + } + + cfg := metric.NewFloat64HistogramConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfHistogram)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } i := &sfHistogram{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64Gauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64Gauge(name, options...) + } + + cfg := metric.NewFloat64GaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*sfGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } i := &sfGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } i := &afCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableUpDownCounter(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableUpDownCounter(name, options...) + } + + cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afUpDownCounter)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } i := &afUpDownCounter{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - return del.Float64ObservableGauge(name, options...) - } m.mtx.Lock() defer m.mtx.Unlock() + + if m.delegate != nil { + return m.delegate.Float64ObservableGauge(name, options...) + } + + cfg := metric.NewFloat64ObservableGaugeConfig(options...) + id := instID{ + name: name, + kind: reflect.TypeOf((*afGauge)(nil)), + description: cfg.Description(), + unit: cfg.Unit(), + } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } i := &afGauge{name: name, opts: options} - m.instruments = append(m.instruments, i) + m.instruments[id] = i return i, nil } // RegisterCallback captures the function that will be called during Collect. func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) { - if del, ok := m.delegate.Load().(metric.Meter); ok { - insts = unwrapInstruments(insts) - return del.RegisterCallback(f, insts...) - } - m.mtx.Lock() defer m.mtx.Unlock() + if m.delegate != nil { + insts = unwrapInstruments(insts) + return m.delegate.RegisterCallback(f, insts...) + } + reg := ®istration{instruments: insts, function: f} e := m.registry.PushBack(reg) reg.unreg = func() error { @@ -349,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) { reg, err := m.RegisterCallback(c.function, insts...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b3..b2fe3e41 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,11 +20,13 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +38,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go index cf23db77..f8435d8f 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go @@ -213,7 +213,7 @@ type Float64Observer interface { } // Float64Callback is a function registered with a Meter that makes -// observations for a Float64Observerable instrument it is registered with. +// observations for a Float64Observable instrument it is registered with. // Calls to the Float64Observer record measurement values for the // Float64Observable. // diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go index c82ba532..e079aaef 100644 --- a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go +++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go @@ -212,7 +212,7 @@ type Int64Observer interface { } // Int64Callback is a function registered with a Meter that makes observations -// for an Int64Observerable instrument it is registered with. Calls to the +// for an Int64Observable instrument it is registered with. Calls to the // Int64Observer record measurement values for the Int64Observable. // // The function needs to complete in a finite amount of time and the deadline diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e402..a535782e 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e0..14e08c24 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md new file mode 100644 index 00000000..bb896943 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md @@ -0,0 +1,3 @@ +# Metric Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop) diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go new file mode 100644 index 00000000..ca6fcbdc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go @@ -0,0 +1,281 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry metric API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry metric API will +// effectively disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry metric API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/metric/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ metric.MeterProvider = MeterProvider{} + _ metric.Meter = Meter{} + _ metric.Observer = Observer{} + _ metric.Registration = Registration{} + _ metric.Int64Counter = Int64Counter{} + _ metric.Float64Counter = Float64Counter{} + _ metric.Int64UpDownCounter = Int64UpDownCounter{} + _ metric.Float64UpDownCounter = Float64UpDownCounter{} + _ metric.Int64Histogram = Int64Histogram{} + _ metric.Float64Histogram = Float64Histogram{} + _ metric.Int64Gauge = Int64Gauge{} + _ metric.Float64Gauge = Float64Gauge{} + _ metric.Int64ObservableCounter = Int64ObservableCounter{} + _ metric.Float64ObservableCounter = Float64ObservableCounter{} + _ metric.Int64ObservableGauge = Int64ObservableGauge{} + _ metric.Float64ObservableGauge = Float64ObservableGauge{} + _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{} + _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{} + _ metric.Int64Observer = Int64Observer{} + _ metric.Float64Observer = Float64Observer{} +) + +// MeterProvider is an OpenTelemetry No-Op MeterProvider. +type MeterProvider struct{ embedded.MeterProvider } + +// NewMeterProvider returns a MeterProvider that does not record any telemetry. +func NewMeterProvider() MeterProvider { + return MeterProvider{} +} + +// Meter returns an OpenTelemetry Meter that does not record any telemetry. +func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter { + return Meter{} +} + +// Meter is an OpenTelemetry No-Op Meter. +type Meter struct{ embedded.Meter } + +// Int64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) { + return Int64Counter{}, nil +} + +// Int64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { + return Int64UpDownCounter{}, nil +} + +// Int64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) { + return Int64Histogram{}, nil +} + +// Int64Gauge returns a Gauge used to record int64 measurements that +// produces no telemetry. +func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) { + return Int64Gauge{}, nil +} + +// Int64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableCounter(string, ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { + return Int64ObservableCounter{}, nil +} + +// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Int64ObservableUpDownCounter(string, ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { + return Int64ObservableUpDownCounter{}, nil +} + +// Int64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { + return Int64ObservableGauge{}, nil +} + +// Float64Counter returns a Counter used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) { + return Float64Counter{}, nil +} + +// Float64UpDownCounter returns an UpDownCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { + return Float64UpDownCounter{}, nil +} + +// Float64Histogram returns a Histogram used to record int64 measurements that +// produces no telemetry. +func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { + return Float64Histogram{}, nil +} + +// Float64Gauge returns a Gauge used to record float64 measurements that +// produces no telemetry. +func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) { + return Float64Gauge{}, nil +} + +// Float64ObservableCounter returns an ObservableCounter used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableCounter(string, ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { + return Float64ObservableCounter{}, nil +} + +// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to +// record int64 measurements that produces no telemetry. +func (Meter) Float64ObservableUpDownCounter(string, ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { + return Float64ObservableUpDownCounter{}, nil +} + +// Float64ObservableGauge returns an ObservableGauge used to record int64 +// measurements that produces no telemetry. +func (Meter) Float64ObservableGauge(string, ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { + return Float64ObservableGauge{}, nil +} + +// RegisterCallback performs no operation. +func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) { + return Registration{}, nil +} + +// Observer acts as a recorder of measurements for multiple instruments in a +// Callback, it performing no operation. +type Observer struct{ embedded.Observer } + +// ObserveFloat64 performs no operation. +func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) { +} + +// ObserveInt64 performs no operation. +func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) { +} + +// Registration is the registration of a Callback with a No-Op Meter. +type Registration struct{ embedded.Registration } + +// Unregister unregisters the Callback the Registration represents with the +// No-Op Meter. This will always return nil because the No-Op Meter performs no +// operation, including hold any record of registrations. +func (Registration) Unregister() error { return nil } + +// Int64Counter is an OpenTelemetry Counter used to record int64 measurements. +// It produces no telemetry. +type Int64Counter struct{ embedded.Int64Counter } + +// Add performs no operation. +func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64Counter is an OpenTelemetry Counter used to record float64 +// measurements. It produces no telemetry. +type Float64Counter struct{ embedded.Float64Counter } + +// Add performs no operation. +func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64 +// measurements. It produces no telemetry. +type Int64UpDownCounter struct{ embedded.Int64UpDownCounter } + +// Add performs no operation. +func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {} + +// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record +// float64 measurements. It produces no telemetry. +type Float64UpDownCounter struct{ embedded.Float64UpDownCounter } + +// Add performs no operation. +func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {} + +// Int64Histogram is an OpenTelemetry Histogram used to record int64 +// measurements. It produces no telemetry. +type Int64Histogram struct{ embedded.Int64Histogram } + +// Record performs no operation. +func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Histogram is an OpenTelemetry Histogram used to record float64 +// measurements. It produces no telemetry. +type Float64Histogram struct{ embedded.Float64Histogram } + +// Record performs no operation. +func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64 +// measurements. It produces no telemetry. +type Int64Gauge struct{ embedded.Int64Gauge } + +// Record performs no operation. +func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {} + +// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64 +// measurements. It produces no telemetry. +type Float64Gauge struct{ embedded.Float64Gauge } + +// Record performs no operation. +func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {} + +// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableCounter struct { + metric.Int64Observable + embedded.Int64ObservableCounter +} + +// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableCounter struct { + metric.Float64Observable + embedded.Float64ObservableCounter +} + +// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// int64 measurements. It produces no telemetry. +type Int64ObservableGauge struct { + metric.Int64Observable + embedded.Int64ObservableGauge +} + +// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record +// float64 measurements. It produces no telemetry. +type Float64ObservableGauge struct { + metric.Float64Observable + embedded.Float64ObservableGauge +} + +// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record int64 measurements. It produces no telemetry. +type Int64ObservableUpDownCounter struct { + metric.Int64Observable + embedded.Int64ObservableUpDownCounter +} + +// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter +// used to record float64 measurements. It produces no telemetry. +type Float64ObservableUpDownCounter struct { + metric.Float64Observable + embedded.Float64ObservableUpDownCounter +} + +// Int64Observer is a recorder of int64 measurements that performs no operation. +type Int64Observer struct{ embedded.Int64Observer } + +// Observe performs no operation. +func (Int64Observer) Observe(int64, ...metric.ObserveOption) {} + +// Float64Observer is a recorder of float64 measurements that performs no +// operation. +type Float64Observer struct{ embedded.Float64Observer } + +// Observe performs no operation. +func (Float64Observer) Observe(float64, ...metric.ObserveOption) {} diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 8c5ac55c..0a29a2f1 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -19,6 +19,14 @@ "matchManagers": ["gomod"], "matchDepTypes": ["indirect"], "enabled": false + }, + { + "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], + "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md new file mode 100644 index 00000000..f81b1576 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/README.md @@ -0,0 +1,3 @@ +# SDK + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md new file mode 100644 index 00000000..06e6d868 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md @@ -0,0 +1,3 @@ +# SDK Instrumentation + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/instrumentation)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation) diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 00000000..a4faa6a0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,13 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 00000000..f2cdf3c6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +// +// Deprecated: use [Scope] instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 00000000..72811504 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go new file mode 100644 index 00000000..07923ed8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -0,0 +1,166 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package env // import "go.opentelemetry.io/otel/sdk/internal/env" + +import ( + "os" + "strconv" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names. +const ( + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). + BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). + BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). + BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // BatchSpanProcessorMaxQueueSize. + BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" +) + +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value := os.Getenv(key) + if value == "" { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchSpanProcessorScheduleDelay returns the environment variable value for +// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) +} + +// BatchSpanProcessorExportTimeout returns the environment variable value for +// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) +} + +// BatchSpanProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) +} + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md new file mode 100644 index 00000000..fab61647 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md @@ -0,0 +1,46 @@ +# Experimental Features + +The SDK contains features that have not yet stabilized in the OpenTelemetry specification. +These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback. + +These feature may change in backwards incompatible ways as feedback is applied. +See the [Compatibility and Stability](#compatibility-and-stability) section for more information. + +## Features + +- [Resource](#resource) + +### Resource + +[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental. +To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable. +The value set must be the case-insensitive string of `"true"` to enable the feature. +All other values are ignored. + + + +[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/ +[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector + +#### Examples + +Enable experimental resource semantic conventions. + +```console +export OTEL_GO_X_RESOURCE=true +``` + +Disable experimental resource semantic conventions. + +```console +unset OTEL_GO_X_RESOURCE +``` + +## Compatibility and Stability + +Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md). +These features may be removed or modified in successive version releases, including patch versions. + +When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release. +There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version. +If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support. diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go new file mode 100644 index 00000000..68d296cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package x contains support for OTel SDK experimental features. +// +// This package should only be used for features defined in the specification. +// It should not be used for experiments or new project ideas. +package x // import "go.opentelemetry.io/otel/sdk/internal/x" + +import ( + "os" + "strings" +) + +// Resource is an experimental feature flag that defines if resource detectors +// should be included experimental semantic conventions. +// +// To enable this feature set the OTEL_GO_X_RESOURCE environment variable +// to the case-insensitive string value of "true" (i.e. "True" and "TRUE" +// will also enable this). +var Resource = newFeature("RESOURCE", func(v string) (string, bool) { + if strings.ToLower(v) == "true" { + return v, true + } + return "", false +}) + +// Feature is an experimental feature control flag. It provides a uniform way +// to interact with these feature flags and parse their values. +type Feature[T any] struct { + key string + parse func(v string) (T, bool) +} + +func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { + const envKeyRoot = "OTEL_GO_X_" + return Feature[T]{ + key: envKeyRoot + suffix, + parse: parse, + } +} + +// Key returns the environment variable key that needs to be set to enable the +// feature. +func (f Feature[T]) Key() string { return f.key } + +// Lookup returns the user configured value for the feature and true if the +// user has enabled the feature. Otherwise, if the feature is not enabled, a +// zero-value and false are returned. +func (f Feature[T]) Lookup() (v T, ok bool) { + // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value + // + // > The SDK MUST interpret an empty value of an environment variable the + // > same way as when the variable is unset. + vRaw := os.Getenv(f.key) + if vRaw == "" { + return v, ok + } + return f.parse(vRaw) +} + +// Enabled returns if the feature is enabled. +func (f Feature[T]) Enabled() bool { + _, ok := f.Lookup() + return ok +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md new file mode 100644 index 00000000..4ad864d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md @@ -0,0 +1,3 @@ +# SDK Resource + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/resource)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 00000000..95a61d61 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "strings" +) + +// ErrPartialResource is returned by a detector when complete source +// information for a Resource is unavailable or the source information +// contains invalid values that are omitted from the returned Resource. +var ErrPartialResource = errors.New("partial resource") + +// Detector detects OpenTelemetry resource information. +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect returns a new [Resource] merged from all the Resources each of the +// detectors produces. Each of the detectors are called sequentially, in the +// order they are passed, merging the produced resource into the previous. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if that error is returned from a detector. It may also +// return a merge-conflicting Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from different detectors results +// in a schema URL conflict. It is up to the caller to determine if this +// returned Resource should be used or not. +// +// If one of the detectors returns an error that is not [ErrPartialResource], +// the resource produced by the detector will not be merged and the returned +// error will wrap that detector's error. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + r := new(Resource) + return r, detect(ctx, r, detectors) +} + +// detect runs all detectors using ctx and merges the result into res. This +// assumes res is allocated and not nil, it will panic otherwise. +// +// If the detectors or merging resources produces any errors (i.e. +// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of +// these errors will be returned. Otherwise, nil is returned. +func detect(ctx context.Context, res *Resource, detectors []Detector) error { + var ( + r *Resource + errs detectErrs + err error + ) + + for _, detector := range detectors { + if detector == nil { + continue + } + r, err = detector.Detect(ctx) + if err != nil { + errs = append(errs, err) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + r, err = Merge(res, r) + if err != nil { + errs = append(errs, err) + } + *res = *r + } + + if len(errs) == 0 { + return nil + } + if errors.Is(errs, ErrSchemaURLConflict) { + // If there has been a merge conflict, ensure the resource has no + // schema URL. + res.schemaURL = "" + } + return errs +} + +type detectErrs []error + +func (e detectErrs) Error() string { + errStr := make([]string, len(e)) + for i, err := range e { + errStr[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred detecting resource:\n\t%s" + return fmt.Sprintf(format, len(e), strings.Join(errStr, "\n\t")) +} + +func (e detectErrs) Unwrap() error { + switch len(e) { + case 0: + return nil + case 1: + return e[0] + } + return e[1:] +} + +func (e detectErrs) Is(target error) bool { + return len(e) != 0 && errors.Is(e[0], target) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 00000000..6ac1cdbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,118 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/google/uuid" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} + + defaultServiceInstanceIDDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} + _ Detector = defaultServiceInstanceIDDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKName("opentelemetry"), + semconv.TelemetrySDKLanguageGo, + semconv.TelemetrySDKVersion(sdk.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector. +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} + +// Detect implements Detector. +func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceInstanceIDKey, + func() (string, error) { + version4Uuid, err := uuid.NewRandom() + if err != nil { + return "", err + } + + return version4Uuid.String(), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 00000000..0d6e213d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,195 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithHostID adds host ID information to the configured resource. +func WithHostID() Option { + return WithDetectors(hostIDDetector{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +// Note: WithContainerID will not extract the correct container ID in an ECS environment. +// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs). +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 00000000..5ecd859a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 00000000..64939a27 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +// +// While this package provides a stable API, +// the attributes added by resource detectors may change. +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 00000000..813f0562 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "net/url" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +// errMissingValue is returned when a resource value is missing. +var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface. +var _ Detector = fromEnv{} + +// Detect collects resources from environment. +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceName(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + var attrs []attribute.KeyValue + var invalid []string + for _, p := range pairs { + k, v, found := strings.Cut(p, "=") + if !found { + invalid = append(invalid, p) + continue + } + key := strings.TrimSpace(k) + val, err := url.PathUnescape(strings.TrimSpace(v)) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + val = v + otel.Handle(err) + } + attrs = append(attrs, attribute.String(key, val)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go new file mode 100644 index 00000000..2d0f6549 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go @@ -0,0 +1,109 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "strings" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type hostIDProvider func() (string, error) + +var defaultHostIDProvider hostIDProvider = platformHostIDReader.read + +var hostID = defaultHostIDProvider + +type hostIDReader interface { + read() (string, error) +} + +type fileReader func(string) (string, error) + +type commandExecutor func(string, ...string) (string, error) + +// hostIDReaderBSD implements hostIDReader. +type hostIDReaderBSD struct { + execCommand commandExecutor + readFile fileReader +} + +// read attempts to read the machine-id from /etc/hostid. If not found it will +// execute `kenv -q smbios.system.uuid`. If neither location yields an id an +// error will be returned. +func (r *hostIDReaderBSD) read() (string, error) { + if result, err := r.readFile("/etc/hostid"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/hostid or kenv") +} + +// hostIDReaderDarwin implements hostIDReader. +type hostIDReaderDarwin struct { + execCommand commandExecutor +} + +// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id +// from the IOPlatformUUID line. If the command fails or the uuid cannot be +// parsed an error will be returned. +func (r *hostIDReaderDarwin) read() (string, error) { + result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") + if err != nil { + return "", err + } + + lines := strings.Split(result, "\n") + for _, line := range lines { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.Split(line, " = ") + if len(parts) == 2 { + return strings.Trim(parts[1], "\""), nil + } + break + } + } + + return "", errors.New("could not parse IOPlatformUUID") +} + +type hostIDReaderLinux struct { + readFile fileReader +} + +// read attempts to read the machine-id from /etc/machine-id followed by +// /var/lib/dbus/machine-id. If neither location yields an ID an error will +// be returned. +func (r *hostIDReaderLinux) read() (string, error) { + if result, err := r.readFile("/etc/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil { + return strings.TrimSpace(result), nil + } + + return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id") +} + +type hostIDDetector struct{} + +// Detect returns a *Resource containing the platform specific host id. +func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { + hostID, err := hostID() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.HostID(hostID), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go new file mode 100644 index 00000000..cc8b8938 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go @@ -0,0 +1,12 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build dragonfly || freebsd || netbsd || openbsd || solaris +// +build dragonfly freebsd netbsd openbsd solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderBSD{ + execCommand: execCommand, + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go new file mode 100644 index 00000000..b09fde3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderDarwin{ + execCommand: execCommand, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go new file mode 100644 index 00000000..d9e5d1a8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os/exec" + +func execCommand(name string, arg ...string) (string, error) { + cmd := exec.Command(name, arg...) + b, err := cmd.Output() + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go new file mode 100644 index 00000000..f84f1732 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux +// +build linux + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +var platformHostIDReader hostIDReader = &hostIDReaderLinux{ + readFile: readFile, +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go new file mode 100644 index 00000000..6354b356 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go @@ -0,0 +1,17 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import "os" + +func readFile(filename string) (string, error) { + b, err := os.ReadFile(filename) + if err != nil { + return "", err + } + + return string(b), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go new file mode 100644 index 00000000..df12c44c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// hostIDReaderUnsupported is a placeholder implementation for operating systems +// for which this project currently doesn't support host.id +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +type hostIDReaderUnsupported struct{} + +func (*hostIDReaderUnsupported) read() (string, error) { + return "", nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go new file mode 100644 index 00000000..3677c83d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build windows +// +build windows + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "golang.org/x/sys/windows/registry" +) + +// implements hostIDReader. +type hostIDReaderWindows struct{} + +// read reads MachineGuid from the Windows registry key: +// SOFTWARE\Microsoft\Cryptography. +func (*hostIDReaderWindows) read() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`, + registry.QUERY_VALUE|registry.WOW64_64KEY, + ) + if err != nil { + return "", err + } + defer k.Close() + + guid, _, err := k.GetStringValue("MachineGuid") + if err != nil { + return "", err + } + + return guid, nil +} + +var platformHostIDReader hostIDReader = &hostIDReaderWindows{} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 00000000..8a48ab4f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type ( + osTypeDetector struct{} + osDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescription(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "aix": semconv.OSTypeAIX, + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + "zos": semconv.OSTypeZOS, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 00000000..ce455dc5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 00000000..f537e5ca --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + k, v, found := strings.Cut(line, "=") + + if !found || len(k) == 0 { + return "", "", false + } + + key := strings.TrimSpace(k) + value := unescape(unquote(strings.TrimSpace(v))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 00000000..a6ff26a4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), + ), nil +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 00000000..a77742b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,15 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 00000000..a6a5a53c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 00000000..085fe68f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" +) + +type ( + pidProvider func() int + executablePathProvider func() (string, error) + commandArgsProvider func() []string + ownerProvider func() (*user.User, error) + runtimeNameProvider func() string + runtimeVersionProvider func() string + runtimeOSProvider func() string + runtimeArchProvider func() string +) + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type ( + processPIDDetector struct{} + processExecutableNameDetector struct{} + processExecutablePathDetector struct{} + processCommandArgsDetector struct{} + processOwnerDetector struct{} + processRuntimeNameDetector struct{} + processRuntimeVersionDetector struct{} + processRuntimeDescriptionDetector struct{} +) + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescription(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 00000000..ad4b50df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,294 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/internal/x" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + defaultResource *Resource + defaultResourceOnce sync.Once +) + +// ErrSchemaURLConflict is an error returned when two Resources are merged +// together that contain different, non-empty, schema URLs. +var ErrSchemaURLConflict = errors.New("conflicting Schema URL") + +// New returns a [Resource] built using opts. +// +// This may return a partial Resource along with an error containing +// [ErrPartialResource] if options that provide a [Detector] are used and that +// error is returned from one or more of the Detectors. It may also return a +// merge-conflict Resource along with an error containing +// [ErrSchemaURLConflict] if merging Resources from the opts results in a +// schema URL conflict (see [Resource.Merge] for more information). It is up to +// the caller to determine if this returned Resource should be used or not +// based on these errors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + r := &Resource{schemaURL: cfg.schemaURL} + return r, detect(ctx, r, cfg.detectors) +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &Resource{} + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &Resource{} + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this Resource. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +// SchemaURL returns the schema URL associated with Resource r. +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new [Resource] by merging a and b. +// +// If there are common keys between a and b, then the value from b will +// overwrite the value from a, even if b's value is empty. +// +// The SchemaURL of the resources will be merged according to the +// [OpenTelemetry specification rules]: +// +// - If a's schema URL is empty then the returned Resource's schema URL will +// be set to the schema URL of b, +// - Else if b's schema URL is empty then the returned Resource's schema URL +// will be set to the schema URL of a, +// - Else if the schema URLs of a and b are the same then that will be the +// schema URL of the returned Resource, +// - Else this is a merging error. If the resources have different, +// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will +// be returned with the merged Resource. The merged Resource will have an +// empty schema URL. It may be the case that some unintended attributes +// have been overwritten or old semantic conventions persisted in the +// returned Resource. It is up to the caller to determine if this returned +// Resource should be used or not. +// +// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Attribute()) + } + + switch { + case a.schemaURL == "": + return NewWithAttributes(b.schemaURL, combine...), nil + case b.schemaURL == "": + return NewWithAttributes(a.schemaURL, combine...), nil + case a.schemaURL == b.schemaURL: + return NewWithAttributes(a.schemaURL, combine...), nil + } + // Return the merged resource with an appropriate error. It is up to + // the user to decide if the returned resource can be used or not. + return NewSchemaless(combine...), fmt.Errorf( + "%w: %s and %s", + ErrSchemaURLConflict, + a.schemaURL, + b.schemaURL, + ) +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &Resource{} +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultDetectors := []Detector{ + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + } + if x.Resource.Enabled() { + defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...) + } + defaultResource, err = Detect( + context.Background(), + defaultDetectors..., + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &Resource{} + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md new file mode 100644 index 00000000..f2936e14 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md @@ -0,0 +1,3 @@ +# SDK Trace + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/sdk/trace)](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go new file mode 100644 index 00000000..4ce757df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -0,0 +1,413 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/trace" +) + +// Defaults for BatchSpanProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +// BatchSpanProcessorOption configures a BatchSpanProcessor. +type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) + +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. +type BatchSpanProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the + // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available spans when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of spans to process in a single batch. + // If there are more than one batch worth of spans then it processes multiple batches + // of spans one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter + o BatchSpanProcessorOptions + + queue chan ReadOnlySpan + dropped uint32 + + batch []ReadOnlySpan + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} + stopped atomic.Bool +} + +var _ SpanProcessor = (*batchSpanProcessor)(nil) + +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. +// +// If the exporter is nil, the span processor will perform no action. +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchSpanProcessorOptions{ + BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + bsp := &batchSpanProcessor{ + e: exporter, + o: o, + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + bsp.stopWait.Add(1) + go func() { + defer bsp.stopWait.Done() + bsp.processQueue() + bsp.drainQueue() + }() + + return bsp +} + +// OnStart method does nothing. +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} + +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + // Do not enqueue spans after Shutdown. + if bsp.stopped.Load() { + return + } + + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return + } + bsp.enqueue(s) +} + +// Shutdown flushes the queue and waits until all spans are processed. +// It only executes once. Subsequent call does nothing. +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { + bsp.stopped.Store(true) + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) + bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +type forceFlushSpan struct { + ReadOnlySpan + flushed chan struct{} +} + +func (f forceFlushSpan) SpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) +} + +// ForceFlush exports all ended spans that have not yet been exported. +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + // Interrupt if context is already canceled. + if err := ctx.Err(); err != nil { + return err + } + + // Do nothing after Shutdown. + if bsp.stopped.Load() { + return nil + } + + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { + select { + case <-bsp.stopCh: + // The batchSpanProcessor is Shutdown. + return nil + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. +func WithMaxQueueSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxQueueSize = size + } +} + +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. +func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). +func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BatchTimeout = delay + } +} + +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. +func WithBlocking() BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// exportSpans is a subroutine of processing and draining the queue. +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + bsp.timer.Reset(bsp.o.BatchTimeout) + + bsp.batchMutex.Lock() + defer bsp.batchMutex.Unlock() + + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +// processQueue removes spans from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (bsp *batchSpanProcessor) processQueue() { + defer bsp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-bsp.stopCh: + return + case <-bsp.timer.C: + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + case sd := <-bsp.queue: + if ffs, ok := sd.(forceFlushSpan); ok { + close(ffs.flushed) + continue + } + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + if shouldExport { + if !bsp.timer.Stop() { + // Handle both GODEBUG=asynctimerchan=[0|1] properly. + select { + case <-bsp.timer.C: + default: + } + } + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-bsp.queue: + if _, ok := sd.(forceFlushSpan); ok { + // Ignore flush requests as they are not valid spans. + continue + } + + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + + if shouldExport { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + default: + // There are no more enqueued spans. Make final export. + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + return + } + } +} + +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + select { + case bsp.queue <- sd: + return true + default: + atomic.AddUint32(&bsp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. +func (bsp *batchSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + SpanExporter SpanExporter + Config BatchSpanProcessorOptions + }{ + Type: "BatchSpanProcessor", + SpanExporter: bsp.e, + Config: bsp.o, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go new file mode 100644 index 00000000..1f60524e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package trace contains support for OpenTelemetry distributed tracing. + +The following assumes a basic familiarity with OpenTelemetry concepts. +See https://opentelemetry.io. +*/ +package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go new file mode 100644 index 00000000..60a7ed13 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string + + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int + + // Time at which this event was recorded. + Time time.Time +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go new file mode 100644 index 00000000..8c308dd6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "slices" + "sync" + + "go.opentelemetry.io/otel/internal/global" +) + +// evictedQueue is a FIFO queue with a configurable capacity. +type evictedQueue[T any] struct { + queue []T + capacity int + droppedCount int + logDroppedMsg string + logDroppedOnce sync.Once +} + +func newEvictedQueueEvent(capacity int) evictedQueue[Event] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Event]{ + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Event", + } +} + +func newEvictedQueueLink(capacity int) evictedQueue[Link] { + // Do not pre-allocate queue, do this lazily. + return evictedQueue[Link]{ + capacity: capacity, + logDroppedMsg: "limit reached: dropping trace trace.Link", + } +} + +// add adds value to the evictedQueue eq. If eq is at capacity, the oldest +// queued value will be discarded and the drop count incremented. +func (eq *evictedQueue[T]) add(value T) { + if eq.capacity == 0 { + eq.droppedCount++ + eq.logDropped() + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { + // Drop first-in while avoiding allocating more capacity to eq.queue. + copy(eq.queue[:eq.capacity-1], eq.queue[1:]) + eq.queue = eq.queue[:eq.capacity-1] + eq.droppedCount++ + eq.logDropped() + } + eq.queue = append(eq.queue, value) +} + +func (eq *evictedQueue[T]) logDropped() { + eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) }) +} + +// copy returns a copy of the evictedQueue. +func (eq *evictedQueue[T]) copy() []T { + return slices.Clone(eq.queue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go new file mode 100644 index 00000000..925bcf99 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + + "go.opentelemetry.io/otel/trace" +) + +// IDGenerator allows custom generators for TraceID and SpanID. +type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. + NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. + NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type randomIDGenerator struct { + sync.Mutex + randSource *rand.Rand +} + +var _ IDGenerator = &randomIDGenerator{} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + gen.Lock() + defer gen.Unlock() + sid := trace.SpanID{} + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } + return sid +} + +// NewIDs returns a non-zero trace ID and a non-zero span ID from a +// randomly-chosen sequence. +func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + gen.Lock() + defer gen.Unlock() + tid := trace.TraceID{} + sid := trace.SpanID{} + for { + _, _ = gen.randSource.Read(tid[:]) + if tid.IsValid() { + break + } + } + for { + _, _ = gen.randSource.Read(sid[:]) + if sid.IsValid() { + break + } + } + return tid, sid +} + +func defaultIDGenerator() IDGenerator { + gen := &randomIDGenerator{} + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + gen.randSource = rand.New(rand.NewSource(rngSeed)) + return gen +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 00000000..c03bdc90 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go new file mode 100644 index 00000000..14c2e5be --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -0,0 +1,493 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/noop" +) + +const ( + defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" +) + +// tracerProviderConfig. +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. + processors []SpanProcessor + + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// MarshalLog is the marshaling function used by the logging system to represent this Provider. +func (cfg tracerProviderConfig) MarshalLog() interface{} { + return struct { + SpanProcessors []SpanProcessor + SamplerType string + IDGeneratorType string + SpanLimits SpanLimits + Resource *resource.Resource + }{ + SpanProcessors: cfg.processors, + SamplerType: fmt.Sprintf("%T", cfg.sampler), + IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), + SpanLimits: cfg.spanLimits, + Resource: cfg.resource, + } +} + +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. +type TracerProvider struct { + embedded.TracerProvider + + mu sync.Mutex + namedTracer map[instrumentation.Scope]*tracer + spanProcessors atomic.Pointer[spanProcessorStates] + + isShutdown atomic.Bool + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource +} + +var _ trace.TracerProvider = &TracerProvider{} + +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidTracerProviderConfig(o) + + tp := &TracerProvider{ + namedTracer: make(map[instrumentation.Scope]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, + } + global.Info("TracerProvider created", "config", o) + + spss := make(spanProcessorStates, 0, len(o.processors)) + for _, sp := range o.processors { + spss = append(spss, newSpanProcessorState(sp)) + } + tp.spanProcessors.Store(&spss) + + return tp +} + +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. +func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown(). + if p.isShutdown.Load() { + return noop.NewTracerProvider().Tracer(name, opts...) + } + c := trace.NewTracerConfig(opts...) + if name == "" { + name = defaultTracerName + } + is := instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + + t, ok := func() (trace.Tracer, bool) { + p.mu.Lock() + defer p.mu.Unlock() + // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran + // after the first check above but before we acquired the mutex. + if p.isShutdown.Load() { + return noop.NewTracerProvider().Tracer(name, opts...), true + } + t, ok := p.namedTracer[is] + if !ok { + t = &tracer{ + provider: p, + instrumentationScope: is, + } + p.namedTracer[is] = t + } + return t, ok + }() + if !ok { + // This code is outside the mutex to not hold the lock while calling third party logging code: + // - That code may do slow things like I/O, which would prolong the duration the lock is held, + // slowing down all tracing consumers. + // - Logging code may be instrumented with tracing and deadlock because it could try + // acquiring the same non-reentrant mutex. + global.Info("Tracer created", "name", name, "version", is.Version, "schemaURL", is.SchemaURL) + } + return t +} + +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { + // This check prevents calls during a shutdown. + if p.isShutdown.Load() { + return + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown. + if p.isShutdown.Load() { + return + } + + current := p.getSpanProcessors() + newSPS := make(spanProcessorStates, 0, len(current)+1) + newSPS = append(newSPS, current...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(&newSPS) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { + // This check prevents calls during a shutdown. + if p.isShutdown.Load() { + return + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown. + if p.isShutdown.Load() { + return + } + old := p.getSpanProcessors() + if len(old) == 0 { + return + } + spss := make(spanProcessorStates, len(old)) + copy(spss, old) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { + if sps.sp == sp { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { + if err := sp.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) + } + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) + } + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + + p.spanProcessors.Store(&spss) +} + +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss := p.getSpanProcessors() + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. +// After Shutdown is called, all methods are no-ops. +func (p *TracerProvider) Shutdown(ctx context.Context) error { + // This check prevents deadlocks in case of recursive shutdown. + if p.isShutdown.Load() { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + // This check prevents calls after a shutdown has already been done concurrently. + if !p.isShutdown.CompareAndSwap(false, true) { // did toggle? + return nil + } + + var retErr error + for _, sps := range p.getSpanProcessors() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + sps.state.Do(func() { + err = sps.sp.Shutdown(ctx) + }) + if err != nil { + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%w; %w", retErr, err) + } + } + } + p.spanProcessors.Store(&spanProcessorStates{}) + return retErr +} + +func (p *TracerProvider) getSpanProcessors() spanProcessorStates { + return *(p.spanProcessors.Load()) +} + +// TracerProviderOption configures a TracerProvider. +type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +} + +type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig + +func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { + return fn(cfg) +} + +// WithSyncer registers the exporter with the TracerProvider using a +// SimpleSpanProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { + return WithSpanProcessor(NewSimpleSpanProcessor(e)) +} + +// WithBatcher registers the exporter with the TracerProvider using a +// BatchSpanProcessor configured with the passed opts. +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { + return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) +} + +// WithSpanProcessor registers the SpanProcessor with a TracerProvider. +func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.processors = append(cfg.processors, sp) + return cfg + }) +} + +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. +func WithResource(r *resource.Resource) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. +func WithIDGenerator(g IDGenerator) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if g != nil { + cfg.idGenerator = g + } + return cfg + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if s != nil { + cfg.sampler = s + } + return cfg + }) +} + +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. +// +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. +// +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = sl + return cfg + }) +} + +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + if cfg.resource == nil { + cfg.resource = resource.Default() + } + return cfg +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 00000000..d2d1f724 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go new file mode 100644 index 00000000..ebb6df6c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -0,0 +1,282 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "encoding/binary" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Sampler decides whether a trace should be sampled and exported. +type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. + ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. + Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link +} + +// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. +type SamplingDecision uint8 + +// Valid sampling decisions. +const ( + // Drop will not record the span and all attributes/events will be dropped. + Drop SamplingDecision = iota + + // Record indicates the span's `IsRecording() == true`, but `Sampled` flag + // *must not* be set. + RecordOnly + + // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag + // *must* be set. + RecordAndSample +) + +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. +type SamplingResult struct { + Decision SamplingDecision + Attributes []attribute.KeyValue + Tracestate trace.TraceState +} + +type traceIDRatioSampler struct { + traceIDUpperBound uint64 + description string +} + +func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), + } +} + +func (ts traceIDRatioSampler) Description() string { + return ts.description +} + +// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will +// always sample. Fractions < 0 are treated as zero. To respect the +// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used +// as a delegate of a `Parent` sampler. +// +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` +func TraceIDRatioBased(fraction float64) Sampler { + if fraction >= 1 { + return AlwaysSample() + } + + if fraction <= 0 { + fraction = 0 + } + + return &traceIDRatioSampler{ + traceIDUpperBound: uint64(fraction * (1 << 63)), + description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), + } +} + +type alwaysOnSampler struct{} + +func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOnSampler) Description() string { + return "AlwaysOnSampler" +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return alwaysOnSampler{} +} + +type alwaysOffSampler struct{} + +func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOffSampler) Description() string { + return "AlwaysOffSampler" +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return alwaysOffSampler{} +} + +// ParentBased returns a sampler decorator which behaves differently, +// based on the parent of the span. If the span has no parent, +// the decorated sampler is used to make sampling decision. If the span has +// a parent, depending on whether the parent is remote and whether it +// is sampled, one of the following samplers will apply: +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) +func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, + config: configureSamplersForParentBased(samplers), + } +} + +type parentBased struct { + root Sampler + config samplerConfig +} + +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ + remoteParentSampled: AlwaysSample(), + remoteParentNotSampled: NeverSample(), + localParentSampled: AlwaysSample(), + localParentNotSampled: NeverSample(), + } + + for _, so := range samplers { + c = so.apply(c) + } + + return c +} + +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { + remoteParentSampled, remoteParentNotSampled Sampler + localParentSampled, localParentNotSampled Sampler +} + +// ParentBasedSamplerOption configures the sampler for a particular sampling case. +type ParentBasedSamplerOption interface { + apply(samplerConfig) samplerConfig +} + +// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. +func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentSampledOption{s} +} + +type remoteParentSampledOption struct { + s Sampler +} + +func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentSampled = o.s + return config +} + +// WithRemoteParentNotSampled sets the sampler for the case of remote parent +// which is not sampled. +func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentNotSampledOption{s} +} + +type remoteParentNotSampledOption struct { + s Sampler +} + +func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentNotSampled = o.s + return config +} + +// WithLocalParentSampled sets the sampler for the case of sampled local parent. +func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { + return localParentSampledOption{s} +} + +type localParentSampledOption struct { + s Sampler +} + +func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentSampled = o.s + return config +} + +// WithLocalParentNotSampled sets the sampler for the case of local parent +// which is not sampled. +func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { + return localParentNotSampledOption{s} +} + +type localParentNotSampledOption struct { + s Sampler +} + +func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentNotSampled = o.s + return config +} + +func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { + return pb.config.remoteParentSampled.ShouldSample(p) + } + return pb.config.remoteParentNotSampled.ShouldSample(p) + } + + if psc.IsSampled() { + return pb.config.localParentSampled.ShouldSample(p) + } + return pb.config.localParentNotSampled.ShouldSample(p) + } + return pb.root.ShouldSample(p) +} + +func (pb parentBased) Description() string { + return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ + "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", + pb.root.Description(), + pb.config.remoteParentSampled.Description(), + pb.config.remoteParentNotSampled.Description(), + pb.config.localParentSampled.Description(), + pb.config.localParentNotSampled.Description(), + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go new file mode 100644 index 00000000..554111bb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -0,0 +1,121 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" +) + +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.Mutex + exporter SpanExporter + stopOnce sync.Once +} + +var _ SpanProcessor = (*simpleSpanProcessor)(nil) + +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor makes it good for testing, debugging, or showing +// examples of other features, but it will be slow and have a high computation +// resource usage overhead. The BatchSpanProcessor is recommended for production +// use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } + global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.") + + return ssp +} + +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.Lock() + defer ssp.exporterMu.Unlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + otel.Handle(err) + } + } +} + +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a potential + // deadlock. If the exporter shut down operation generates a span, that + // span would need to be exported. Meaning, OnEnd would be called and + // try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + // Wait for the exporter to shut down or the deadline to expire. + select { + case err = <-done: + case <-ctx.Done(): + // It is possible for the exporter to have immediately shut down and + // the context to be done simultaneously. In that case this outer + // select statement will randomly choose a case. This will result in + // a different returned error for similar scenarios. Instead, double + // check if the exporter shut down at the same time and return that + // error if so. This will ensure consistency as well as ensure + // the caller knows the exporter shut down successfully (they can + // already determine if the deadline is expired given they passed + // the context). + select { + case err = <-done: + default: + err = ctx.Err() + } + } + }) + return err +} + +// ForceFlush does nothing as there is no data to flush. +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent +// this Span Processor. +func (ssp *simpleSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter SpanExporter + }{ + Type: "SimpleSpanProcessor", + Exporter: ssp.exporter, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 00000000..d511d0f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility + return s.instrumentationScope +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go new file mode 100644 index 00000000..730fb85c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -0,0 +1,895 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "reflect" + "runtime" + rt "runtime/trace" + "slices" + "strings" + "sync" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + // The order of the returned attributes is not guaranteed to be stable across invocations. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + // Deprecated: please use InstrumentationScope instead. + InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} + +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { + embedded.Span + + // mu protects the contents of this span. + mu sync.Mutex + + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // spanContext holds the SpanContext of this span. + spanContext trace.SpanContext + + // attributes is a collection of user provided key/values. The collection + // is constrained by a configurable maximum held by the parent + // TracerProvider. When additional attributes are added after this maximum + // is reached these attributes the user is attempting to add are dropped. + // This dropped number of attributes is tracked and reported in the + // ReadOnlySpan exported when the span ends. + attributes []attribute.KeyValue + droppedAttributes int + logDropAttrsOnce sync.Once + + // events are stored in FIFO queue capped by configured limit. + events evictedQueue[Event] + + // links are stored in FIFO queue capped by configured limit. + links evictedQueue[Link] + + // executionTracerTaskEnd ends the execution tracer span. + executionTracerTaskEnd func() + + // tracer is the SDK tracer that created this span. + tracer *tracer +} + +var ( + _ ReadWriteSpan = (*recordingSpan)(nil) + _ runtimeTracer = (*recordingSpan)(nil) +) + +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + return s.spanContext +} + +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + + return s.isRecording() +} + +// isRecording returns if this span is being recorded. If this span has ended +// this will return false. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) isRecording() bool { + if s == nil { + return false + } + return s.endTime.IsZero() +} + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + if s.status.Code > code { + return + } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + + s.status = status +} + +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +// +// If adding attributes to the span would exceed the maximum amount of +// attributes the span is configured to have, the last added attributes will +// be dropped. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { + if s == nil || len(attributes) == 0 { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.addDroppedAttr(len(attributes)) + return + } + + // If adding these attributes could exceed the capacity of s perform a + // de-duplication and truncation while adding to avoid over allocation. + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) + return + } + + // Otherwise, add without deduplication. When attributes are read they + // will be deduplicated, optimizing the operation. + s.attributes = slices.Grow(s.attributes, len(attributes)) + for _, a := range attributes { + if !a.Valid() { + // Drop all invalid attributes. + s.addDroppedAttr(1) + continue + } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + } +} + +// Declared as a var so tests can override. +var logDropAttrs = func() { + global.Warn("limit reached: dropping trace Span attributes") +} + +// addDroppedAttr adds incr to the count of dropped attributes. +// +// The first, and only the first, time this method is called a warning will be +// logged. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addDroppedAttr(incr int) { + s.droppedAttributes += incr + s.logDropAttrsOnce.Do(logDropAttrs) +} + +// addOverCapAttrs adds the attributes attrs to the span s while +// de-duplicating the attributes of s and attrs and dropping attributes that +// exceed the limit. +// +// This method assumes s.mu.Lock is held by the caller. +// +// This method should only be called when there is a possibility that adding +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { + // In order to not allocate more capacity to s.attributes than needed, + // prune and truncate this addition of attributes while adding. + + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) + + // Now that s.attributes is deduplicated, adding unique attributes up to + // the capacity of s will not over allocate s.attributes. + + // max size = limit + maxCap := min(len(attrs)+len(s.attributes), limit) + if cap(s.attributes) < maxCap { + s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes)) + } + for _, a := range attrs { + if !a.Valid() { + // Drop all invalid attributes. + s.addDroppedAttr(1) + continue + } + + if idx, ok := exists[a.Key]; ok { + // Perform all updates before dropping, even when at capacity. + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes[idx] = a + continue + } + + if len(s.attributes) >= limit { + // Do not just drop all of the remaining attributes, make sure + // updates are checked and performed. + s.addDroppedAttr(1) + } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) + s.attributes = append(s.attributes, a) + exists[a.Key] = len(s.attributes) - 1 + } + } +} + +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is performed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. +// +// The only SpanOption currently supported is WithTimestamp which will set the +// end time for a Span's life-cycle. +// +// If this method is called while panicking an error event is added to the +// Span before ending it and the panic is continued. +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. + if s == nil { + return + } + + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := monotonicEndTime(s.startTime) + + // Lock the span now that we have an end time and see if we need to do any more processing. + s.mu.Lock() + if !s.isRecording() { + s.mu.Unlock() + return + } + + config := trace.NewSpanEndConfig(options...) + if recovered := recover(); recovered != nil { + // Record but don't stop the panic. + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( + semconv.ExceptionType(typeStr(recovered)), + semconv.ExceptionMessage(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) + } + + if s.executionTracerTaskEnd != nil { + s.mu.Unlock() + s.executionTracerTaskEnd() + s.mu.Lock() + } + + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() + } + s.mu.Unlock() + + sps := s.tracer.provider.getSpanProcessors() + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } +} + +// monotonicEndTime returns the end time at present but offset from start, +// monotonically. +// +// The monotonic clock is used in subtractions hence the duration since start +// added back to start gives end as a monotonic time. See +// https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func monotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} + +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + + opts = append(opts, trace.WithAttributes( + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktrace(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) +} + +func typeStr(i interface{}) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) +} + +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded then this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + s.addEvent(name, o...) +} + +// addEvent adds an event with the provided name and options. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { + c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] + } + + s.events.add(e) +} + +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + s.name = name +} + +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +// +// The order of the returned attributes is not guaranteed to be stable. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + s.dedupeAttrs() + return s.attributes +} + +// dedupeAttrs deduplicates the attributes of s to fit capacity. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrs() { + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int, len(s.attributes)) + s.dedupeAttrsFromRecord(exists) +} + +// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity +// using record as the record of unique attribute keys to their index. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) { + // Use the fact that slices share the same backing array. + unique := s.attributes[:0] + for _, a := range s.attributes { + if idx, ok := record[a.Key]; ok { + unique[idx] = a + } else { + unique = append(unique, a) + record[a.Key] = len(unique) - 1 + } + } + // s.attributes have element types of attribute.KeyValue. These types are + // not pointers and they themselves do not contain pointer fields, + // therefore the duplicate values do not need to be zeroed for them to be + // garbage collected. + s.attributes = unique +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} + } + return s.links.copy() +} + +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} + } + return s.events.copy() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.provider.resource +} + +func (s *recordingSpan) AddLink(link trace.Link) { + if s == nil { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} + + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] + } + + s.links.add(l) +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.droppedAttributes +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationScope = s.tracer.instrumentationScope + sd.name = s.name + sd.parent = s.parent + sd.resource = s.tracer.provider.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if len(s.attributes) > 0 { + s.dedupeAttrs() + sd.attributes = s.attributes + } + sd.droppedAttributeCount = s.droppedAttributes + if len(s.events.queue) > 0 { + sd.events = s.events.copy() + sd.droppedEventCount = s.events.droppedCount + } + if len(s.links.queue) > 0 { + sd.links = s.links.copy() + sd.droppedLinkCount = s.links.droppedCount + } + return &sd +} + +func (s *recordingSpan) addChild() { + if s == nil { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRecording() { + return + } + s.childSpanCount++ +} + +func (*recordingSpan) private() {} + +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx + } + nctx, task := rt.NewTask(ctx, s.name) + + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() + + return nctx +} + +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + embedded.Span + + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (nonRecordingSpan) AddLink(trace.Link) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 00000000..6bdda3d9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to perform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 00000000..bec5e209 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go new file mode 100644 index 00000000..af7f9177 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" +) + +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. +type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended spans to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed spans. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type spanProcessorState struct { + sp SpanProcessor + state sync.Once +} + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp} +} + +type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go new file mode 100644 index 00000000..43419d3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +type tracer struct { + embedded.Tracer + + provider *TracerProvider + instrumentationScope instrumentation.Scope +} + +var _ trace.Tracer = &tracer{} + +// Start starts a Span and returns it along with a context containing it. +// +// The Span is created with the provided name and as a child of any existing +// span context found in the passed context. The created Span will be +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + + // For local spans created by this SDK, track child span count. + if p := trace.SpanFromContext(ctx); p != nil { + if sdkSpan, ok := p.(*recordingSpan); ok { + sdkSpan.addChild() + } + } + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps := tr.provider.getSpanProcessors() + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } + } + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) + } + + return trace.ContextWithSpan(ctx, s), s +} + +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() + } + + s := &recordingSpan{ + // Do not pre-allocate the attributes slice here! Doing so will + // allocate memory that is likely never going to be used, or if used, + // will be over-sized. The default Go compiler has been tested to + // dynamically allocate needed space very well. Benchmarking has shown + // it to be more performant than what we can predetermine here, + // especially for the common use case of few to no added + // attributes. + + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + } + + for _, l := range config.Links() { + s.AddLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go new file mode 100644 index 00000000..b84dd2c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// version is the current release version of the metric SDK in use. +func version() string { + return "1.16.0-rc.1" +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go new file mode 100644 index 00000000..dc1eaa8e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package sdk // import "go.opentelemetry.io/otel/sdk" + +// Version is the current release version of the OpenTelemetry SDK in use. +func Version() string { + return "1.31.0" +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md new file mode 100644 index 00000000..87b842c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.17.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.17.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go new file mode 100644 index 00000000..e087c9c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go similarity index 63% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go index 6c019aaf..c7b804bb 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go @@ -3,65 +3,10 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - // This semantic convention defines the attributes used to represent a feature // flag evaluation as an event. const ( @@ -71,7 +16,7 @@ const ( // // Type: string // RequirementLevel: Required - // Stability: experimental + // Stability: stable // Examples: 'logo-color' FeatureFlagKeyKey = attribute.Key("feature_flag.key") @@ -81,7 +26,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: experimental + // Stability: stable // Examples: 'Flag Manager' FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") @@ -92,7 +37,7 @@ const ( // // Type: string // RequirementLevel: Recommended - // Stability: experimental + // Stability: stable // Examples: 'red', 'true', 'on' // Note: A semantic identifier, commonly referred to as a variant, provides // a means @@ -130,14 +75,14 @@ func FeatureFlagVariant(val string) attribute.KeyValue { // RPC received/sent message. const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. // - // Type: int + // Type: Enum // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") + // Stability: stable + MessageTypeKey = attribute.Key("message.type") // MessageIDKey is the attribute Key conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two @@ -146,19 +91,19 @@ const ( // // Type: int // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Note: This way we guarantee that the values will be consistent between // different implementations. MessageIDKey = attribute.Key("message.id") - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. // - // Type: Enum + // Type: int // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") // MessageUncompressedSizeKey is the attribute Key conforming to the // "message.uncompressed_size" semantic conventions. It represents the @@ -166,7 +111,7 @@ const ( // // Type: int // RequirementLevel: Optional - // Stability: experimental + // Stability: stable MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") ) @@ -177,13 +122,6 @@ var ( MessageTypeReceived = MessageTypeKey.String("RECEIVED") ) -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - // MessageID returns an attribute KeyValue conforming to the "message.id" // semantic conventions. It represents the mUST be calculated as two different // counters starting from `1` one for sent messages and one for received @@ -192,9 +130,59 @@ func MessageID(val int) attribute.KeyValue { return MessageIDKey.Int(val) } +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + // MessageUncompressedSize returns an attribute KeyValue conforming to the // "message.uncompressed_size" semantic conventions. It represents the // uncompressed size of the message in bytes. func MessageUncompressedSize(val int) attribute.KeyValue { return MessageUncompressedSizeKey.Int(val) } + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 00000000..137acc67 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 00000000..d318221e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go similarity index 66% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go index d66bbe9c..7e365e82 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -3,119 +3,204 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" import "go.opentelemetry.io/otel/attribute" -// A cloud environment (e.g. GCP, Azure, AWS). +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserUserAgentKey is the attribute Key conforming to the + // "browser.user_agent" semantic conventions. It represents the full + // user-agent string provided by the browser // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) + // AppleWebKit/537.36 (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do + // not have a mechanism to retrieve brands and platform individually from + // the User-Agent Client Hints API. To retrieve the value, the legacy + // `navigator.userAgent` API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser // - // Type: Enum + // Type: string // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserUserAgent returns an attribute KeyValue conforming to the +// "browser.user_agent" semantic conventions. It represents the full user-agent +// string provided by the browser +func BrowserUserAgent(val string) attribute.KeyValue { + return BrowserUserAgentKey.String(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} +// A cloud environment (e.g. GCP, Azure, AWS) +const ( // CloudProviderKey is the attribute Key conforming to the "cloud.provider" // semantic conventions. It represents the name of the cloud provider. // // Type: Enum // RequirementLevel: Optional - // Stability: experimental + // Stability: stable CloudProviderKey = attribute.Key("cloud.provider") + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // CloudRegionKey is the attribute Key conforming to the "cloud.region" // semantic conventions. It represents the geographical region the resource // is running. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'us-central1', 'us-east-1' // Note: Refer to your provider's docs to see the available regions, for // example [Alibaba Cloud // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), // [Google Cloud regions](https://cloud.google.com/about/locations), or // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). + // regions](https://intl.cloud.tencent.com/document/product/213/6091). CloudRegionKey = attribute.Key("cloud.region") - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") ) var ( @@ -151,8 +236,6 @@ var ( CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") // Azure Red Hat OpenShift CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") // Google Cloud Compute Engine (GCE) CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") // Google Cloud Run @@ -164,7 +247,7 @@ var ( // Google Cloud App Engine (GAE) CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") // Red Hat OpenShift on IBM Cloud CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") // Tencent Cloud Cloud Virtual Machine (CVM) @@ -175,23 +258,6 @@ var ( CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") ) -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - // CloudAccountID returns an attribute KeyValue conforming to the // "cloud.account.id" semantic conventions. It represents the cloud account ID // the resource is assigned to. @@ -199,15 +265,6 @@ func CloudAccountID(val string) attribute.KeyValue { return CloudAccountIDKey.String(val) } -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - // CloudRegion returns an attribute KeyValue conforming to the // "cloud.region" semantic conventions. It represents the geographical region // the resource is running. @@ -215,171 +272,289 @@ func CloudRegion(val string) attribute.KeyValue { return CloudRegionKey.String(val) } -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) } -// A container instance. +// Resources used by AWS Elastic Container Service (ECS). const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. // - // Type: string + // Type: Enum // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. // - // Type: string[] + // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} +// A container instance. +const ( // ContainerNameKey is the attribute Key conforming to the "container.name" // semantic conventions. It represents the container name used by container // runtime. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'opentelemetry-autoconf' ContainerNameKey = attribute.Key("container.name") + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // ContainerRuntimeKey is the attribute Key conforming to the // "container.runtime" semantic conventions. It represents the container // runtime managing this container. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'docker', 'containerd', 'rkt' ContainerRuntimeKey = attribute.Key("container.runtime") -) -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) } // ContainerID returns an attribute KeyValue conforming to the @@ -391,11 +566,11 @@ func ContainerID(val string) attribute.KeyValue { return ContainerIDKey.String(val) } -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) } // ContainerImageName returns an attribute KeyValue conforming to the @@ -405,45 +580,45 @@ func ContainerImageName(val string) attribute.KeyValue { return ContainerImageNameKey.String(val) } -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) } -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) } -// Describes device attributes. +// The device on which the process represented by this resource is running. const ( // DeviceIDKey is the attribute Key conforming to the "device.id" semantic // conventions. It represents a unique identifier representing the device // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' // Note: The device identifier MUST only be defined using the values // outlined below. This value is not an advertising identifier and MUST NOT @@ -460,28 +635,15 @@ const ( // diligence. DeviceIDKey = attribute.Key("device.id") - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - // DeviceModelIdentifierKey is the attribute Key conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version + // Note: It's recommended this value represents a machine readable version // of the model identifier rather than the market or consumer-friendly name // of the device. DeviceModelIdentifierKey = attribute.Key("device.model.identifier") @@ -492,11 +654,24 @@ const ( // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") ) // DeviceID returns an attribute KeyValue conforming to the "device.id" @@ -506,13 +681,6 @@ func DeviceID(val string) attribute.KeyValue { return DeviceIDKey.String(val) } -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - // DeviceModelIdentifier returns an attribute KeyValue conforming to the // "device.model.identifier" semantic conventions. It represents the model // identifier for the device @@ -527,172 +695,237 @@ func DeviceModelName(val string) attribute.KeyValue { return DeviceModelNameKey.String(val) } -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. + // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic + // conventions. It represents the unique ID of the single function that + // this runtime instance executes. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so consider setting `faas.id` as a span attribute instead. + // + // The exact value to use for `faas.id` depends on the cloud provider: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function in MiB. // // Type: int // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic +// conventions. It represents the unique ID of the single function that this +// runtime instance executes. +func FaaSID(val string) attribute.KeyValue { + return FaaSIDKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function in MiB. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} +// A host is defined as a general computing instance. +const ( // HostIDKey is the attribute Key conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. + // Linux systems, the `machine-id` located in `/etc/machine-id` or + // `/var/lib/dbus/machine-id` may be used. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'fdbf79e8af94cb7f9e8df36789187052' HostIDKey = attribute.Key("host.id") - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. // - // Type: string[] + // Type: Enum // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") + // Stability: stable + HostArchKey = attribute.Key("host.arch") - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. // - // Type: string[] + // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") ) var ( @@ -714,94 +947,15 @@ var ( HostArchX86 = HostArchKey.String("x86") ) -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - // HostID returns an attribute KeyValue conforming to the "host.id" semantic // conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. +// instance_id assigned by the cloud provider. For non-containerized Linux +// systems, the `machine-id` located in `/etc/machine-id` or +// `/var/lib/dbus/machine-id` may be used. func HostID(val string) attribute.KeyValue { return HostIDKey.String(val) } -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - // HostName returns an attribute KeyValue conforming to the "host.name" // semantic conventions. It represents the name of the host. On Unix systems, // it may contain what the hostname command returns, or the fully qualified @@ -817,7 +971,29 @@ func HostType(val string) attribute.KeyValue { return HostTypeKey.String(val) } -// Kubernetes resource attributes. +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. const ( // K8SClusterNameKey is the attribute Key conforming to the // "k8s.cluster.name" semantic conventions. It represents the name of the @@ -825,416 +1001,375 @@ const ( // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'opentelemetry-cluster' K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. // - // Type: string + // Type: int // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + K8SJobUIDKey = attribute.Key("k8s.job.uid") - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + K8SJobNameKey = attribute.Key("k8s.job.name") ) -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - // K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" // semantic conventions. It represents the UID of the Job. func K8SJobUID(val string) attribute.KeyValue { return K8SJobUIDKey.String(val) } -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) } -// An OCI image manifest. +// A Kubernetes CronJob object. const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") ) -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) } // The operating system (OS) on which the process represented by this resource // is running. const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") // OSDescriptionKey is the attribute Key conforming to the "os.description" // semantic conventions. It represents the human readable (not intended to @@ -1243,7 +1378,7 @@ const ( // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 // LTS' OSDescriptionKey = attribute.Key("os.description") @@ -1253,26 +1388,18 @@ const ( // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'iOS', 'Android', 'Ubuntu' OSNameKey = attribute.Key("os.name") - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - // OSVersionKey is the attribute Key conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '14.2.1', '18.04.1' OSVersionKey = attribute.Key("os.version") ) @@ -1302,13 +1429,6 @@ var ( OSTypeZOS = OSTypeKey.String("z_os") ) -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - // OSDescription returns an attribute KeyValue conforming to the // "os.description" semantic conventions. It represents the human readable (not // intended to be parsed) OS version information, like e.g. reported by `ver` @@ -1326,51 +1446,31 @@ func OSName(val string) attribute.KeyValue { // OSVersion returns an attribute KeyValue conforming to the "os.version" // semantic conventions. It represents the version string of the operating // system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). func OSVersion(val string) attribute.KeyValue { return OSVersionKey.String(val) } // An operating system process. const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). // - // Type: string[] + // Type: int // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). // - // Type: string + // Type: int // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") // ProcessExecutableNameKey is the attribute Key conforming to the // "process.executable.name" semantic conventions. It represents the name @@ -1379,8 +1479,9 @@ const ( // of `GetProcessImageFileNameW`. // // Type: string - // RequirementLevel: Optional - // Stability: experimental + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable // Examples: 'otelcol' ProcessExecutableNameKey = attribute.Key("process.executable.name") @@ -1391,102 +1492,76 @@ const ( // `GetProcessImageFileNameW`. // // Type: string - // RequirementLevel: Optional - // Stability: experimental + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable // Examples: '/usr/bin/cmd/otelcol' ProcessExecutablePathKey = attribute.Key("process.executable.path") - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. // // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. // // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") ) -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) } -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) } // ProcessExecutableName returns an attribute KeyValue conforming to the @@ -1507,675 +1582,101 @@ func ProcessExecutablePath(val string) attribute.KeyValue { return ProcessExecutablePathKey.String(val) } -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) } -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) } -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) } -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) } -// Resources used by AWS Elastic Container Service (ECS). +// The single (language) runtime instance which is monitored. const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. // - // Type: int + // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. // // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") ) -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) } -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) } -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) } // A service instance. @@ -2185,7 +1686,7 @@ const ( // // Type: string // RequirementLevel: Required - // Stability: experimental + // Stability: stable // Examples: 'shoppingcart' // Note: MUST be the same for all instances of horizontally scaled // services. If the value was not specified, SDKs MUST fallback to @@ -2195,44 +1696,32 @@ const ( // the value MUST be set to `unknown_service`. ServiceNameKey = attribute.Key("service.name") - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") -// A service instance. -const ( // ServiceInstanceIDKey is the attribute Key conforming to the // "service.instance.id" semantic conventions. It represents the string ID // of the service instance. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' // Note: MUST be unique for each instance of the same // `service.namespace,service.name` pair (in other words // `service.namespace,service.name,service.instance.id` triplet MUST be @@ -2248,30 +1737,22 @@ const ( // Version 5, see RFC 4122 for more recommendations). ServiceInstanceIDKey = attribute.Key("service.instance.id") - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. // // Type: string // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") ) -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) } // ServiceNamespace returns an attribute KeyValue conforming to the @@ -2281,48 +1762,61 @@ func ServiceNamespace(val string) attribute.KeyValue { return ServiceNamespaceKey.String(val) } +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + // The telemetry SDK used to capture data recorded by the instrumentation // libraries. const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // TelemetrySDKNameKey is the attribute Key conforming to the // "telemetry.sdk.name" semantic conventions. It represents the name of the // telemetry SDK as defined above. // // Type: string - // RequirementLevel: Required - // Stability: experimental + // RequirementLevel: Optional + // Stability: stable // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // TelemetrySDKVersionKey is the attribute Key conforming to the // "telemetry.sdk.version" semantic conventions. It represents the version // string of the telemetry SDK. // // Type: string - // RequirementLevel: Required - // Stability: experimental + // RequirementLevel: Optional + // Stability: stable // Examples: '1.2.3' TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") ) var ( @@ -2344,12 +1838,10 @@ var ( TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") // ruby TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") // webjs TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") ) // TelemetrySDKName returns an attribute KeyValue conforming to the @@ -2366,70 +1858,22 @@ func TelemetrySDKVersion(val string) attribute.KeyValue { return TelemetrySDKVersionKey.String(val) } -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) } // Resource describing the packaged software running the application code. Web // engines are typically executed using process.runtime. const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" // semantic conventions. It represents the name of the web engine. // // Type: string // RequirementLevel: Required - // Stability: experimental + // Stability: stable // Examples: 'WildFly' WebEngineNameKey = attribute.Key("webengine.name") @@ -2439,18 +1883,22 @@ const ( // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '21.0.0' WebEngineVersionKey = attribute.Key("webengine.version") -) -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) // WebEngineName returns an attribute KeyValue conforming to the // "webengine.name" semantic conventions. It represents the name of the web @@ -2466,80 +1914,86 @@ func WebEngineVersion(val string) attribute.KeyValue { return WebEngineVersionKey.String(val) } +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + // Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's // concepts. const ( - // OTelScopeNameKey is the attribute Key conforming to the + // OtelScopeNameKey is the attribute Key conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") + OtelScopeNameKey = attribute.Key("otel.scope.name") - // OTelScopeVersionKey is the attribute Key conforming to the + // OtelScopeVersionKey is the attribute Key conforming to the // "otel.scope.version" semantic conventions. It represents the version of // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). // // Type: string // RequirementLevel: Optional - // Stability: experimental + // Stability: stable // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") + OtelScopeVersionKey = attribute.Key("otel.scope.version") ) -// OTelScopeName returns an attribute KeyValue conforming to the +// OtelScopeName returns an attribute KeyValue conforming to the // "otel.scope.name" semantic conventions. It represents the name of the // instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) +func OtelScopeName(val string) attribute.KeyValue { + return OtelScopeNameKey.String(val) } -// OTelScopeVersion returns an attribute KeyValue conforming to the +// OtelScopeVersion returns an attribute KeyValue conforming to the // "otel.scope.version" semantic conventions. It represents the version of the // instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) +func OtelScopeVersion(val string) attribute.KeyValue { + return OtelScopeVersionKey.String(val) } // Span attributes used by non-OTLP exporters to represent OpenTelemetry // Scope's concepts. const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. + // OtelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") + OtelLibraryNameKey = attribute.Key("otel.library.name") - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. + // OtelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. // // Type: string // RequirementLevel: Optional // Stability: deprecated // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") + OtelLibraryVersionKey = attribute.Key("otel.library.version") ) -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) +// OtelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OtelLibraryName(val string) attribute.KeyValue { + return OtelLibraryNameKey.String(val) } -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) +// OtelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OtelLibraryVersion(val string) attribute.KeyValue { + return OtelLibraryVersionKey.String(val) } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 00000000..634a1dce --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 00000000..21497bb6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,3364 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not + // explicitly disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OtelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + + // OtelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// OtelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OtelStatusDescription(val string) attribute.KeyValue { + return OtelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSExecutionKey is the attribute Key conforming to the "faas.execution" + // semantic conventions. It represents the execution ID of the current + // function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSExecution returns an attribute KeyValue conforming to the +// "faas.execution" semantic conventions. It represents the execution ID of the +// current function execution. +func FaaSExecution(val string) attribute.KeyValue { + return FaaSExecutionKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetAppProtocolNameKey is the attribute Key conforming to the + // "net.app.protocol.name" semantic conventions. It represents the + // application layer protocol used. The value SHOULD be normalized to + // lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + + // NetAppProtocolVersionKey is the attribute Key conforming to the + // "net.app.protocol.version" semantic conventions. It represents the + // version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol + // used and might be different from the protocol client's version. If the + // HTTP client used has a version of `0.27.2`, but sends HTTP version + // `1.1`, this attribute should be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetAppProtocolName returns an attribute KeyValue conforming to the +// "net.app.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetAppProtocolName(val string) attribute.KeyValue { + return NetAppProtocolNameKey.String(val) +} + +// NetAppProtocolVersion returns an attribute KeyValue conforming to the +// "net.app.protocol.version" semantic conventions. It represents the version +// of the application layer protocol used. See note below. +func NetAppProtocolVersion(val string) attribute.KeyValue { + return NetAppProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + + // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" + // semantic conventions. It represents the kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be + // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is + // assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + + // HTTPUserAgentKey is the attribute Key conforming to the + // "http.user_agent" semantic conventions. It represents the value of the + // [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTPUserAgent returns an attribute KeyValue conforming to the +// "http.user_agent" semantic conventions. It represents the value of the [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func HTTPUserAgent(val string) attribute.KeyValue { + return HTTPUserAgentKey.String(val) +} + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by + // the HTTP server framework as the route attribute should have + // low-cardinality and the URI path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationKindKey is the attribute Key conforming to the + // "messaging.destination.kind" semantic conventions. It represents the + // kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceKindKey is the attribute Key conforming to the + // "messaging.source.kind" semantic conventions. It represents the kind of + // message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md deleted file mode 100644 index 0b6cbe96..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.24.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go deleted file mode 100644 index 6e688345..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ /dev/null @@ -1,4387 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") -) - -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: experimental - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) - // Stability: experimental - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) - // Stability: experimental - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) - // Stability: experimental - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) - -var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") -) - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// The attributes used to describe telemetry in the context of databases. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") - // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") - // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") -) - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index c1718234..00000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md new file mode 100644 index 00000000..2de1fc3c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md @@ -0,0 +1,3 @@ +# Semconv v1.26.0 + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.26.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go new file mode 100644 index 00000000..d8dc822b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go @@ -0,0 +1,8996 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" + +import "go.opentelemetry.io/otel/attribute" + +// The Android platform on which the Android application is running. +const ( + // AndroidOSAPILevelKey is the attribute Key conforming to the + // "android.os.api_level" semantic conventions. It represents the uniquely + // identifies the framework API revision offered by a version + // (`os.version`) of the android operating system. More information can be + // found + // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '33', '32' + AndroidOSAPILevelKey = attribute.Key("android.os.api_level") +) + +// AndroidOSAPILevel returns an attribute KeyValue conforming to the +// "android.os.api_level" semantic conventions. It represents the uniquely +// identifies the framework API revision offered by a version (`os.version`) of +// the android operating system. More information can be found +// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). +func AndroidOSAPILevel(val string) attribute.KeyValue { + return AndroidOSAPILevelKey.String(val) +} + +// ASP.NET Core attributes +const ( + // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.result" semantic conventions. It represents + // the rate-limiting result, shows whether the lease was acquired or + // contains a rejection reason + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Examples: 'acquired', 'request_canceled' + AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") + + // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to + // the "aspnetcore.diagnostics.handler.type" semantic conventions. It + // represents the full type name of the + // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) + // implementation that handled the exception. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if and only if the exception + // was handled by this handler.) + // Stability: stable + // Examples: 'Contoso.MyHandler' + AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") + + // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming + // to the "aspnetcore.diagnostics.exception.result" semantic conventions. + // It represents the aSP.NET Core exception middleware handling result + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'handled', 'unhandled' + AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result") + + // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the + // "aspnetcore.rate_limiting.policy" semantic conventions. It represents + // the rate limiting policy name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fixed', 'sliding', 'token' + AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") + + // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the + // "aspnetcore.request.is_unhandled" semantic conventions. It represents + // the flag indicating if request was handled by the application pipeline. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") + + // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the + // "aspnetcore.routing.is_fallback" semantic conventions. It represents a + // value that indicates whether the matched route is a fallback route. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Examples: True + AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") + + // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the + // "aspnetcore.routing.match_status" semantic conventions. It represents + // the match result - success or failure + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'success', 'failure' + AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status") +) + +var ( + // Lease was acquired + AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") + // Lease request was rejected by the endpoint limiter + AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") + // Lease request was rejected by the global limiter + AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") + // Lease request was canceled + AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") +) + +var ( + // Exception was handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled") + // Exception was not handled by the exception handling middleware + AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled") + // Exception handling was skipped because the response had started + AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped") + // Exception handling didn't run because the request was aborted + AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted") +) + +var ( + // Match succeeded + AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success") + // Match failed + AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure") +) + +// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming +// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It +// represents the full type name of the +// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) +// implementation that handled the exception. +func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { + return AspnetcoreDiagnosticsHandlerTypeKey.String(val) +} + +// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to +// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents +// the rate limiting policy name. +func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { + return AspnetcoreRateLimitingPolicyKey.String(val) +} + +// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to +// the "aspnetcore.request.is_unhandled" semantic conventions. It represents +// the flag indicating if request was handled by the application pipeline. +func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { + return AspnetcoreRequestIsUnhandledKey.Bool(val) +} + +// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to +// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a +// value that indicates whether the matched route is a fallback route. +func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { + return AspnetcoreRoutingIsFallbackKey.Bool(val) +} + +// Generic attributes for AWS services. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes for AWS DynamoDB. +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") + + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") + + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") + + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the number of +// items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// Attributes for AWS Elastic Container Service (ECS). +const ( + // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id" + // semantic conventions. It represents the ID of a running ECS task. The ID + // MUST be extracted from `task.arn`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is + // populated.) + // Stability: experimental + // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b', + // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a + // running [ECS + // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b', + // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the family + // name of the [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) + // used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for the task definition used to create the ECS task. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSTaskID returns an attribute KeyValue conforming to the +// "aws.ecs.task.id" semantic conventions. It represents the ID of a running +// ECS task. The ID MUST be extracted from `task.arn`. +func AWSECSTaskID(val string) attribute.KeyValue { + return AWSECSTaskIDKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running +// [ECS +// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the family name of +// the [ECS task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) +// used to create the ECS task. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// the task definition used to create the ECS task. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Attributes for AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Attributes for AWS Logs. +const ( + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") +) + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// Attributes for AWS Lambda. +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for AWS S3. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// The web browser attributes +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// These attributes may be used to describe the client in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ClientAddressKey is the attribute Key conforming to the "client.address" + // semantic conventions. It represents the client address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.address` SHOULD represent the client address + // behind any intermediaries, for example proxies, if it's available. + ClientAddressKey = attribute.Key("client.address") + + // ClientPortKey is the attribute Key conforming to the "client.port" + // semantic conventions. It represents the client port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + // Note: When observed from the server side, and when communicating through + // an intermediary, `client.port` SHOULD represent the client port behind + // any intermediaries, for example proxies, if it's available. + ClientPortKey = attribute.Key("client.port") +) + +// ClientAddress returns an attribute KeyValue conforming to the +// "client.address" semantic conventions. It represents the client address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func ClientAddress(val string) attribute.KeyValue { + return ClientAddressKey.String(val) +} + +// ClientPort returns an attribute KeyValue conforming to the "client.port" +// semantic conventions. It represents the client port number. +func ClientPort(val int) attribute.KeyValue { + return ClientPortKey.Int(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS). +const ( + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") + + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Apps + CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Bare Metal Solution (BMS) + CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on +// Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// Attributes for CloudEvents. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeStacktraceKey is the attribute Key conforming to the + // "code.stacktrace" semantic conventions. It represents a stacktrace as a + // string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'at + // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + CodeStacktraceKey = attribute.Key("code.stacktrace") +) + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeStacktrace returns an attribute KeyValue conforming to the +// "code.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func CodeStacktrace(val string) attribute.KeyValue { + return CodeStacktraceKey.String(val) +} + +// A container instance. +const ( + // ContainerCommandKey is the attribute Key conforming to the + // "container.command" semantic conventions. It represents the command used + // to run the container (i.e. the command name). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol' + // Note: If using embedded credentials or sensitive data, it is recommended + // to remove them to prevent potential leakage. + ContainerCommandKey = attribute.Key("container.command") + + // ContainerCommandArgsKey is the attribute Key conforming to the + // "container.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) run by the + // container. [2] + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol, --config, config.yaml' + ContainerCommandArgsKey = attribute.Key("container.command_args") + + // ContainerCommandLineKey is the attribute Key conforming to the + // "container.command_line" semantic conventions. It represents the full + // command run by the container as a single string representing the full + // command. [2] + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcontribcol --config config.yaml' + ContainerCommandLineKey = attribute.Key("container.command_line") + + // ContainerCPUStateKey is the attribute Key conforming to the + // "container.cpu.state" semantic conventions. It represents the CPU state + // for this data point. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'user', 'kernel' + ContainerCPUStateKey = attribute.Key("container.cpu.state") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerImageIDKey is the attribute Key conforming to the + // "container.image.id" semantic conventions. It represents the runtime + // specific image identifier. Usually a hash algorithm followed by a UUID. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' + // Note: Docker defines a sha256 of the image id; `container.image.id` + // corresponds to the `Image` field from the Docker container inspect + // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) + // endpoint. + // K8S defines a link to the container registry repository with digest + // `"imageID": "registry.azurecr.io + // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. + // The ID is assigned by the container runtime and can vary in different + // environments. Consider using `oci.manifest.digest` if it is important to + // identify the same image in different environments/runtimes. + ContainerImageIDKey = attribute.Key("container.image.id") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageRepoDigestsKey is the attribute Key conforming to the + // "container.image.repo_digests" semantic conventions. It represents the + // repo digests of the container image as provided by the container + // runtime. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', + // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' + // Note: + // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) + // and + // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) + // report those under the `RepoDigests` field. + ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") + + // ContainerImageTagsKey is the attribute Key conforming to the + // "container.image.tags" semantic conventions. It represents the container + // image tags. An example can be found in [Docker Image + // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). + // Should be only the `` section of the full name for example from + // `registry.example.com/my-org/my-image:`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'v1.27.1', '3.5.7-0' + ContainerImageTagsKey = attribute.Key("container.image.tags") + + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") +) + +var ( + // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows) + ContainerCPUStateUser = ContainerCPUStateKey.String("user") + // When CPU is used by the system (host OS) + ContainerCPUStateSystem = ContainerCPUStateKey.String("system") + // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows) + ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel") +) + +// ContainerCommand returns an attribute KeyValue conforming to the +// "container.command" semantic conventions. It represents the command used to +// run the container (i.e. the command name). +func ContainerCommand(val string) attribute.KeyValue { + return ContainerCommandKey.String(val) +} + +// ContainerCommandArgs returns an attribute KeyValue conforming to the +// "container.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) run by the +// container. [2] +func ContainerCommandArgs(val ...string) attribute.KeyValue { + return ContainerCommandArgsKey.StringSlice(val) +} + +// ContainerCommandLine returns an attribute KeyValue conforming to the +// "container.command_line" semantic conventions. It represents the full +// command run by the container as a single string representing the full +// command. [2] +func ContainerCommandLine(val string) attribute.KeyValue { + return ContainerCommandLineKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerImageID returns an attribute KeyValue conforming to the +// "container.image.id" semantic conventions. It represents the runtime +// specific image identifier. Usually a hash algorithm followed by a UUID. +func ContainerImageID(val string) attribute.KeyValue { + return ContainerImageIDKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageRepoDigests returns an attribute KeyValue conforming to the +// "container.image.repo_digests" semantic conventions. It represents the repo +// digests of the container image as provided by the container runtime. +func ContainerImageRepoDigests(val ...string) attribute.KeyValue { + return ContainerImageRepoDigestsKey.StringSlice(val) +} + +// ContainerImageTags returns an attribute KeyValue conforming to the +// "container.image.tags" semantic conventions. It represents the container +// image tags. An example can be found in [Docker Image +// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). +// Should be only the `` section of the full name for example from +// `registry.example.com/my-org/my-image:`. +func ContainerImageTags(val ...string) attribute.KeyValue { + return ContainerImageTagsKey.StringSlice(val) +} + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// This group defines the attributes used to describe telemetry in the context +// of databases. +const ( + // DBClientConnectionsPoolNameKey is the attribute Key conforming to the + // "db.client.connections.pool.name" semantic conventions. It represents + // the name of the connection pool; unique within the instrumented + // application. In case the connection pool implementation doesn't provide + // a name, instrumentation should use a combination of `server.address` and + // `server.port` attributes formatted as `server.address:server.port`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myDataSource' + DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name") + + // DBClientConnectionsStateKey is the attribute Key conforming to the + // "db.client.connections.state" semantic conventions. It represents the + // state of a connection in the pool + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle' + DBClientConnectionsStateKey = attribute.Key("db.client.connections.state") + + // DBCollectionNameKey is the attribute Key conforming to the + // "db.collection.name" semantic conventions. It represents the name of a + // collection (table, container) within the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'public.users', 'customers' + // Note: If the collection name is parsed from the query, it SHOULD match + // the value provided in the query and may be qualified with the schema and + // database name. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBCollectionNameKey = attribute.Key("db.collection.name") + + // DBNamespaceKey is the attribute Key conforming to the "db.namespace" + // semantic conventions. It represents the name of the database, fully + // qualified within the server address and port. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'customers', 'test.users' + // Note: If a database system has multiple namespace components, they + // SHOULD be concatenated (potentially using database system specific + // conventions) from most general to most specific namespace component, and + // more specific namespaces SHOULD NOT be captured without the more general + // namespaces, to ensure that "startswith" queries for the more general + // namespaces will be valid. + // Semantic conventions for individual database systems SHOULD document + // what `db.namespace` means in the context of that system. + // It is RECOMMENDED to capture the value as provided by the application + // without attempting to do any case normalization. + DBNamespaceKey = attribute.Key("db.namespace") + + // DBOperationNameKey is the attribute Key conforming to the + // "db.operation.name" semantic conventions. It represents the name of the + // operation or command being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: It is RECOMMENDED to capture the value as provided by the + // application without attempting to do any case normalization. + DBOperationNameKey = attribute.Key("db.operation.name") + + // DBQueryTextKey is the attribute Key conforming to the "db.query.text" + // semantic conventions. It represents the database query being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey + // "WuValue"' + DBQueryTextKey = attribute.Key("db.query.text") + + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents the database management system (DBMS) product + // as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual DBMS may differ from the one identified by the client. + // For example, when using PostgreSQL client libraries to connect to a + // CockroachDB, the `db.system` is set to `postgresql` based on the + // instrumentation's best knowledge. + DBSystemKey = attribute.Key("db.system") +) + +var ( + // idle + DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle") + // used + DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBClientConnectionsPoolName returns an attribute KeyValue conforming to +// the "db.client.connections.pool.name" semantic conventions. It represents +// the name of the connection pool; unique within the instrumented application. +// In case the connection pool implementation doesn't provide a name, +// instrumentation should use a combination of `server.address` and +// `server.port` attributes formatted as `server.address:server.port`. +func DBClientConnectionsPoolName(val string) attribute.KeyValue { + return DBClientConnectionsPoolNameKey.String(val) +} + +// DBCollectionName returns an attribute KeyValue conforming to the +// "db.collection.name" semantic conventions. It represents the name of a +// collection (table, container) within the database. +func DBCollectionName(val string) attribute.KeyValue { + return DBCollectionNameKey.String(val) +} + +// DBNamespace returns an attribute KeyValue conforming to the +// "db.namespace" semantic conventions. It represents the name of the database, +// fully qualified within the server address and port. +func DBNamespace(val string) attribute.KeyValue { + return DBNamespaceKey.String(val) +} + +// DBOperationName returns an attribute KeyValue conforming to the +// "db.operation.name" semantic conventions. It represents the name of the +// operation or command being executed. +func DBOperationName(val string) attribute.KeyValue { + return DBOperationNameKey.String(val) +} + +// DBQueryText returns an attribute KeyValue conforming to the +// "db.query.text" semantic conventions. It represents the database query being +// executed. +func DBQueryText(val string) attribute.KeyValue { + return DBQueryTextKey.String(val) +} + +// This group defines attributes for Cassandra. +const ( + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// This group defines attributes for Azure Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// This group defines attributes for Elasticsearch. +const ( + // DBElasticsearchClusterNameKey is the attribute Key conforming to the + // "db.elasticsearch.cluster.name" semantic conventions. It represents the + // represents the identifier of an Elasticsearch cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' + DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") + + // DBElasticsearchNodeNameKey is the attribute Key conforming to the + // "db.elasticsearch.node.name" semantic conventions. It represents the + // represents the human-readable identifier of the node/instance to which a + // request was routed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-0000000001' + DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") +) + +// DBElasticsearchClusterName returns an attribute KeyValue conforming to +// the "db.elasticsearch.cluster.name" semantic conventions. It represents the +// represents the identifier of an Elasticsearch cluster. +func DBElasticsearchClusterName(val string) attribute.KeyValue { + return DBElasticsearchClusterNameKey.String(val) +} + +// DBElasticsearchNodeName returns an attribute KeyValue conforming to the +// "db.elasticsearch.node.name" semantic conventions. It represents the +// represents the human-readable identifier of the node/instance to which a +// request was routed. +func DBElasticsearchNodeName(val string) attribute.KeyValue { + return DBElasticsearchNodeNameKey.String(val) +} + +// Attributes for software deployments. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'staging', 'production' + // Note: `deployment.environment` does not affect the uniqueness + // constraints defined through + // the `service.namespace`, `service.name` and `service.instance.id` + // resource attributes. + // This implies that resources carrying the following attribute + // combinations MUST be + // considered to be identifying the same service: + // + // * `service.name=frontend`, `deployment.environment=production` + // * `service.name=frontend`, `deployment.environment=staging`. + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) +// (aka deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// Attributes that represents an occurrence of a lifecycle transition on the +// Android platform. +const ( + // AndroidStateKey is the attribute Key conforming to the "android.state" + // semantic conventions. It represents the deprecated use the + // `device.app.lifecycle` event definition including `android.state` as a + // payload field instead. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The Android lifecycle states are defined in [Activity lifecycle + // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), + // and from which the `OS identifiers` are derived. + AndroidStateKey = attribute.Key("android.state") +) + +var ( + // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time + AndroidStateCreated = AndroidStateKey.String("created") + // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state + AndroidStateBackground = AndroidStateKey.String("background") + // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states + AndroidStateForeground = AndroidStateKey.String("foreground") +) + +// These attributes may be used to describe the receiver of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // DestinationAddressKey is the attribute Key conforming to the + // "destination.address" semantic conventions. It represents the + // destination address - domain name if available without reverse DNS + // lookup; otherwise, IP address or Unix domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the source side, and when communicating through + // an intermediary, `destination.address` SHOULD represent the destination + // address behind any intermediaries, for example proxies, if it's + // available. + DestinationAddressKey = attribute.Key("destination.address") + + // DestinationPortKey is the attribute Key conforming to the + // "destination.port" semantic conventions. It represents the destination + // port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + DestinationPortKey = attribute.Key("destination.port") +) + +// DestinationAddress returns an attribute KeyValue conforming to the +// "destination.address" semantic conventions. It represents the destination +// address - domain name if available without reverse DNS lookup; otherwise, IP +// address or Unix domain socket name. +func DestinationAddress(val string) attribute.KeyValue { + return DestinationAddressKey.String(val) +} + +// DestinationPort returns an attribute KeyValue conforming to the +// "destination.port" semantic conventions. It represents the destination port +// number +func DestinationPort(val int) attribute.KeyValue { + return DestinationPortKey.Int(val) +} + +// Describes device attributes. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine-readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human-readable version of + // the device model rather than a machine-readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// These attributes may be used for any disk related operation. +const ( + // DiskIoDirectionKey is the attribute Key conforming to the + // "disk.io.direction" semantic conventions. It represents the disk IO + // operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read' + DiskIoDirectionKey = attribute.Key("disk.io.direction") +) + +var ( + // read + DiskIoDirectionRead = DiskIoDirectionKey.String("read") + // write + DiskIoDirectionWrite = DiskIoDirectionKey.String("write") +) + +// The shared attributes used to report a DNS query. +const ( + // DNSQuestionNameKey is the attribute Key conforming to the + // "dns.question.name" semantic conventions. It represents the name being + // queried. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.example.com', 'opentelemetry.io' + // Note: If the name field contains non-printable characters (below 32 or + // above 126), those characters should be represented as escaped base 10 + // integers (\DDD). Back slashes and quotes should be escaped. Tabs, + // carriage returns, and line feeds should be converted to \t, \r, and \n + // respectively. + DNSQuestionNameKey = attribute.Key("dns.question.name") +) + +// DNSQuestionName returns an attribute KeyValue conforming to the +// "dns.question.name" semantic conventions. It represents the name being +// queried. +func DNSQuestionName(val string) attribute.KeyValue { + return DNSQuestionNameKey.String(val) +} + +// Attributes for operations with an authenticated and/or authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// The shared attributes used to report an error. +const ( + // ErrorTypeKey is the attribute Key conforming to the "error.type" + // semantic conventions. It represents the describes a class of error the + // operation ended with. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'timeout', 'java.net.UnknownHostException', + // 'server_certificate_invalid', '500' + // Note: The `error.type` SHOULD be predictable, and SHOULD have low + // cardinality. + // + // When `error.type` is set to a type (e.g., an exception type), its + // canonical class name identifying the type within the artifact SHOULD be + // used. + // + // Instrumentations SHOULD document the list of errors they report. + // + // The cardinality of `error.type` within one instrumentation library + // SHOULD be low. + // Telemetry consumers that aggregate data from multiple instrumentation + // libraries and applications + // should be prepared for `error.type` to have high cardinality at query + // time when no + // additional filters are applied. + // + // If the operation has completed successfully, instrumentations SHOULD NOT + // set `error.type`. + // + // If a specific domain defines its own set of error identifiers (such as + // HTTP or gRPC status codes), + // it's RECOMMENDED to: + // + // * Use a domain-specific attribute + // * Set `error.type` to capture all errors, regardless of whether they are + // defined within the domain-specific set or not. + ErrorTypeKey = attribute.Key("error.type") +) + +var ( + // A fallback error value to be used when the instrumentation doesn't define a custom value + ErrorTypeOther = ErrorTypeKey.String("_OTHER") +) + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the identifies the class / type of + // event. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'browser.mouse.click', 'device.app.lifecycle' + // Note: Event names are subject to the same rules as [attribute + // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md). + // Notably, event names are namespaced to avoid collisions and provide a + // clean separation of semantics for events in separate domains like + // browser, mobile, and kubernetes. + EventNameKey = attribute.Key("event.name") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the identifies the class / type of +// event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example for recording span + // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// FaaS attributes +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + FaaSColdstartKey = attribute.Key("faas.coldstart") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") + + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") + + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") + + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run (Services):** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// Attributes for Feature Flags. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// Describes file attributes. +const ( + // FileDirectoryKey is the attribute Key conforming to the "file.directory" + // semantic conventions. It represents the directory where the file is + // located. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/user', 'C:\\Program Files\\MyApp' + FileDirectoryKey = attribute.Key("file.directory") + + // FileExtensionKey is the attribute Key conforming to the "file.extension" + // semantic conventions. It represents the file extension, excluding the + // leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: When the file name has multiple extensions (example.tar.gz), only + // the last one should be captured ("gz", not "tar.gz"). + FileExtensionKey = attribute.Key("file.extension") + + // FileNameKey is the attribute Key conforming to the "file.name" semantic + // conventions. It represents the name of the file including the extension, + // without the directory. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.png' + FileNameKey = attribute.Key("file.name") + + // FilePathKey is the attribute Key conforming to the "file.path" semantic + // conventions. It represents the full path to the file, including the file + // name. It should include the drive letter, when appropriate. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/home/alice/example.png', 'C:\\Program + // Files\\MyApp\\myapp.exe' + FilePathKey = attribute.Key("file.path") + + // FileSizeKey is the attribute Key conforming to the "file.size" semantic + // conventions. It represents the file size in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + FileSizeKey = attribute.Key("file.size") +) + +// FileDirectory returns an attribute KeyValue conforming to the +// "file.directory" semantic conventions. It represents the directory where the +// file is located. It should include the drive letter, when appropriate. +func FileDirectory(val string) attribute.KeyValue { + return FileDirectoryKey.String(val) +} + +// FileExtension returns an attribute KeyValue conforming to the +// "file.extension" semantic conventions. It represents the file extension, +// excluding the leading dot. +func FileExtension(val string) attribute.KeyValue { + return FileExtensionKey.String(val) +} + +// FileName returns an attribute KeyValue conforming to the "file.name" +// semantic conventions. It represents the name of the file including the +// extension, without the directory. +func FileName(val string) attribute.KeyValue { + return FileNameKey.String(val) +} + +// FilePath returns an attribute KeyValue conforming to the "file.path" +// semantic conventions. It represents the full path to the file, including the +// file name. It should include the drive letter, when appropriate. +func FilePath(val string) attribute.KeyValue { + return FilePathKey.String(val) +} + +// FileSize returns an attribute KeyValue conforming to the "file.size" +// semantic conventions. It represents the file size in bytes. +func FileSize(val int) attribute.KeyValue { + return FileSizeKey.Int(val) +} + +// Attributes for Google Cloud Run. +const ( + // GCPCloudRunJobExecutionKey is the attribute Key conforming to the + // "gcp.cloud_run.job.execution" semantic conventions. It represents the + // name of the Cloud Run + // [execution](https://cloud.google.com/run/docs/managing/job-executions) + // being run for the Job, as set by the + // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'job-name-xxxx', 'sample-job-mdw84' + GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") + + // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the + // "gcp.cloud_run.job.task_index" semantic conventions. It represents the + // index for a task within an execution as provided by the + // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) + // environment variable. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1 + GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") +) + +// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.execution" semantic conventions. It represents the name +// of the Cloud Run +// [execution](https://cloud.google.com/run/docs/managing/job-executions) being +// run for the Job, as set by the +// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobExecution(val string) attribute.KeyValue { + return GCPCloudRunJobExecutionKey.String(val) +} + +// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the +// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index +// for a task within an execution as provided by the +// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) +// environment variable. +func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { + return GCPCloudRunJobTaskIndexKey.Int(val) +} + +// Attributes for Google Compute Engine (GCE). +const ( + // GCPGceInstanceHostnameKey is the attribute Key conforming to the + // "gcp.gce.instance.hostname" semantic conventions. It represents the + // hostname of a GCE instance. This is the full value of the default or + // [custom + // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-host1234.example.com', + // 'sample-vm.us-west1-b.c.my-project.internal' + GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") + + // GCPGceInstanceNameKey is the attribute Key conforming to the + // "gcp.gce.instance.name" semantic conventions. It represents the instance + // name of a GCE instance. This is the value provided by `host.name`, the + // visible name of the instance in the Cloud Console UI, and the prefix for + // the default hostname of the instance as defined by the [default internal + // DNS + // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'instance-1', 'my-vm-name' + GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") +) + +// GCPGceInstanceHostname returns an attribute KeyValue conforming to the +// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname +// of a GCE instance. This is the full value of the default or [custom +// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). +func GCPGceInstanceHostname(val string) attribute.KeyValue { + return GCPGceInstanceHostnameKey.String(val) +} + +// GCPGceInstanceName returns an attribute KeyValue conforming to the +// "gcp.gce.instance.name" semantic conventions. It represents the instance +// name of a GCE instance. This is the value provided by `host.name`, the +// visible name of the instance in the Cloud Console UI, and the prefix for the +// default hostname of the instance as defined by the [default internal DNS +// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). +func GCPGceInstanceName(val string) attribute.KeyValue { + return GCPGceInstanceNameKey.String(val) +} + +// The attributes used to describe telemetry in the context of LLM (Large +// Language Models) requests and responses. +const ( + // GenAiCompletionKey is the attribute Key conforming to the + // "gen_ai.completion" semantic conventions. It represents the full + // response received from the LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'assistant', 'content': 'The capital of France is + // Paris.'}]" + // Note: It's RECOMMENDED to format completions as JSON string matching + // [OpenAI messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiCompletionKey = attribute.Key("gen_ai.completion") + + // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt" + // semantic conventions. It represents the full prompt sent to an LLM. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: "[{'role': 'user', 'content': 'What is the capital of + // France?'}]" + // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI + // messages + // format](https://platform.openai.com/docs/guides/text-generation) + GenAiPromptKey = attribute.Key("gen_ai.prompt") + + // GenAiRequestMaxTokensKey is the attribute Key conforming to the + // "gen_ai.request.max_tokens" semantic conventions. It represents the + // maximum number of tokens the LLM generates for a request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens") + + // GenAiRequestModelKey is the attribute Key conforming to the + // "gen_ai.request.model" semantic conventions. It represents the name of + // the LLM a request is being made to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4' + GenAiRequestModelKey = attribute.Key("gen_ai.request.model") + + // GenAiRequestTemperatureKey is the attribute Key conforming to the + // "gen_ai.request.temperature" semantic conventions. It represents the + // temperature setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0.0 + GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature") + + // GenAiRequestTopPKey is the attribute Key conforming to the + // "gen_ai.request.top_p" semantic conventions. It represents the top_p + // sampling setting for the LLM request. + // + // Type: double + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1.0 + GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p") + + // GenAiResponseFinishReasonsKey is the attribute Key conforming to the + // "gen_ai.response.finish_reasons" semantic conventions. It represents the + // array of reasons the model stopped generating tokens, corresponding to + // each generation received. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'stop' + GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons") + + // GenAiResponseIDKey is the attribute Key conforming to the + // "gen_ai.response.id" semantic conventions. It represents the unique + // identifier for the completion. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'chatcmpl-123' + GenAiResponseIDKey = attribute.Key("gen_ai.response.id") + + // GenAiResponseModelKey is the attribute Key conforming to the + // "gen_ai.response.model" semantic conventions. It represents the name of + // the LLM a response was generated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'gpt-4-0613' + GenAiResponseModelKey = attribute.Key("gen_ai.response.model") + + // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system" + // semantic conventions. It represents the Generative AI product as + // identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'openai' + // Note: The actual GenAI product may differ from the one identified by the + // client. For example, when using OpenAI client libraries to communicate + // with Mistral, the `gen_ai.system` is set to `openai` based on the + // instrumentation's best knowledge. + GenAiSystemKey = attribute.Key("gen_ai.system") + + // GenAiUsageCompletionTokensKey is the attribute Key conforming to the + // "gen_ai.usage.completion_tokens" semantic conventions. It represents the + // number of tokens used in the LLM response (completion). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 180 + GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens") + + // GenAiUsagePromptTokensKey is the attribute Key conforming to the + // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the + // number of tokens used in the LLM prompt. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 100 + GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens") +) + +var ( + // OpenAI + GenAiSystemOpenai = GenAiSystemKey.String("openai") +) + +// GenAiCompletion returns an attribute KeyValue conforming to the +// "gen_ai.completion" semantic conventions. It represents the full response +// received from the LLM. +func GenAiCompletion(val string) attribute.KeyValue { + return GenAiCompletionKey.String(val) +} + +// GenAiPrompt returns an attribute KeyValue conforming to the +// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to +// an LLM. +func GenAiPrompt(val string) attribute.KeyValue { + return GenAiPromptKey.String(val) +} + +// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the +// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum +// number of tokens the LLM generates for a request. +func GenAiRequestMaxTokens(val int) attribute.KeyValue { + return GenAiRequestMaxTokensKey.Int(val) +} + +// GenAiRequestModel returns an attribute KeyValue conforming to the +// "gen_ai.request.model" semantic conventions. It represents the name of the +// LLM a request is being made to. +func GenAiRequestModel(val string) attribute.KeyValue { + return GenAiRequestModelKey.String(val) +} + +// GenAiRequestTemperature returns an attribute KeyValue conforming to the +// "gen_ai.request.temperature" semantic conventions. It represents the +// temperature setting for the LLM request. +func GenAiRequestTemperature(val float64) attribute.KeyValue { + return GenAiRequestTemperatureKey.Float64(val) +} + +// GenAiRequestTopP returns an attribute KeyValue conforming to the +// "gen_ai.request.top_p" semantic conventions. It represents the top_p +// sampling setting for the LLM request. +func GenAiRequestTopP(val float64) attribute.KeyValue { + return GenAiRequestTopPKey.Float64(val) +} + +// GenAiResponseFinishReasons returns an attribute KeyValue conforming to +// the "gen_ai.response.finish_reasons" semantic conventions. It represents the +// array of reasons the model stopped generating tokens, corresponding to each +// generation received. +func GenAiResponseFinishReasons(val ...string) attribute.KeyValue { + return GenAiResponseFinishReasonsKey.StringSlice(val) +} + +// GenAiResponseID returns an attribute KeyValue conforming to the +// "gen_ai.response.id" semantic conventions. It represents the unique +// identifier for the completion. +func GenAiResponseID(val string) attribute.KeyValue { + return GenAiResponseIDKey.String(val) +} + +// GenAiResponseModel returns an attribute KeyValue conforming to the +// "gen_ai.response.model" semantic conventions. It represents the name of the +// LLM a response was generated from. +func GenAiResponseModel(val string) attribute.KeyValue { + return GenAiResponseModelKey.String(val) +} + +// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to +// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the +// number of tokens used in the LLM response (completion). +func GenAiUsageCompletionTokens(val int) attribute.KeyValue { + return GenAiUsageCompletionTokensKey.Int(val) +} + +// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the +// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number +// of tokens used in the LLM prompt. +func GenAiUsagePromptTokens(val int) attribute.KeyValue { + return GenAiUsagePromptTokensKey.Int(val) +} + +// Attributes for GraphQL. +const ( + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") + + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// Attributes for the Android platform on which the Android application is +// running. +const ( + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") +) + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// A host is defined as a computing instance. For example, physical servers, +// virtual machines, switches or disk array. +const ( + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + HostArchKey = attribute.Key("host.arch") + + // HostCPUCacheL2SizeKey is the attribute Key conforming to the + // "host.cpu.cache.l2.size" semantic conventions. It represents the amount + // of level 2 memory cache available to the processor (in Bytes). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12288000 + HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") + + // HostCPUFamilyKey is the attribute Key conforming to the + // "host.cpu.family" semantic conventions. It represents the family or + // generation of the CPU. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', 'PA-RISC 1.1e' + HostCPUFamilyKey = attribute.Key("host.cpu.family") + + // HostCPUModelIDKey is the attribute Key conforming to the + // "host.cpu.model.id" semantic conventions. It represents the model + // identifier. It provides more granular information about the CPU, + // distinguishing it from other CPUs within the same family. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '6', '9000/778/B180L' + HostCPUModelIDKey = attribute.Key("host.cpu.model.id") + + // HostCPUModelNameKey is the attribute Key conforming to the + // "host.cpu.model.name" semantic conventions. It represents the model + // designation of the processor. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' + HostCPUModelNameKey = attribute.Key("host.cpu.model.name") + + // HostCPUSteppingKey is the attribute Key conforming to the + // "host.cpu.stepping" semantic conventions. It represents the stepping or + // core revisions. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1', 'r1p1' + HostCPUSteppingKey = attribute.Key("host.cpu.stepping") + + // HostCPUVendorIDKey is the attribute Key conforming to the + // "host.cpu.vendor.id" semantic conventions. It represents the processor + // manufacturer identifier. A maximum 12-character string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'GenuineIntel' + // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor + // ID string in EBX, EDX and ECX registers. Writing these to memory in this + // order results in a 12-character string. + HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") + + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID or host OS image ID. + // For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image or host OS as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") + + // HostIPKey is the attribute Key conforming to the "host.ip" semantic + // conventions. It represents the available IP addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' + // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 + // addresses MUST be specified in the [RFC + // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. + HostIPKey = attribute.Key("host.ip") + + // HostMacKey is the attribute Key conforming to the "host.mac" semantic + // conventions. It represents the available MAC addresses of the host, + // excluding loopback interfaces. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' + // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal + // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): + // as hyphen-separated octets in uppercase hexadecimal form from most to + // least significant. + HostMacKey = attribute.Key("host.mac") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostCPUCacheL2Size returns an attribute KeyValue conforming to the +// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of +// level 2 memory cache available to the processor (in Bytes). +func HostCPUCacheL2Size(val int) attribute.KeyValue { + return HostCPUCacheL2SizeKey.Int(val) +} + +// HostCPUFamily returns an attribute KeyValue conforming to the +// "host.cpu.family" semantic conventions. It represents the family or +// generation of the CPU. +func HostCPUFamily(val string) attribute.KeyValue { + return HostCPUFamilyKey.String(val) +} + +// HostCPUModelID returns an attribute KeyValue conforming to the +// "host.cpu.model.id" semantic conventions. It represents the model +// identifier. It provides more granular information about the CPU, +// distinguishing it from other CPUs within the same family. +func HostCPUModelID(val string) attribute.KeyValue { + return HostCPUModelIDKey.String(val) +} + +// HostCPUModelName returns an attribute KeyValue conforming to the +// "host.cpu.model.name" semantic conventions. It represents the model +// designation of the processor. +func HostCPUModelName(val string) attribute.KeyValue { + return HostCPUModelNameKey.String(val) +} + +// HostCPUStepping returns an attribute KeyValue conforming to the +// "host.cpu.stepping" semantic conventions. It represents the stepping or core +// revisions. +func HostCPUStepping(val string) attribute.KeyValue { + return HostCPUSteppingKey.String(val) +} + +// HostCPUVendorID returns an attribute KeyValue conforming to the +// "host.cpu.vendor.id" semantic conventions. It represents the processor +// manufacturer identifier. A maximum 12-character string. +func HostCPUVendorID(val string) attribute.KeyValue { + return HostCPUVendorIDKey.String(val) +} + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID or host +// OS image ID. For Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image or host OS as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic +// conventions. It represents the available IP addresses of the host, excluding +// loopback interfaces. +func HostIP(val ...string) attribute.KeyValue { + return HostIPKey.StringSlice(val) +} + +// HostMac returns an attribute KeyValue conforming to the "host.mac" +// semantic conventions. It represents the available MAC addresses of the host, +// excluding loopback interfaces. +func HostMac(val ...string) attribute.KeyValue { + return HostMacKey.StringSlice(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// Semantic convention attributes in the HTTP namespace. +const ( + // HTTPConnectionStateKey is the attribute Key conforming to the + // "http.connection.state" semantic conventions. It represents the state of + // the HTTP connection in the HTTP connection pool. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'active', 'idle' + HTTPConnectionStateKey = attribute.Key("http.connection.state") + + // HTTPRequestBodySizeKey is the attribute Key conforming to the + // "http.request.body.size" semantic conventions. It represents the size of + // the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") + + // HTTPRequestMethodKey is the attribute Key conforming to the + // "http.request.method" semantic conventions. It represents the hTTP + // request method. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + // Note: HTTP request method value SHOULD be "known" to the + // instrumentation. + // By default, this convention defines "known" methods as the ones listed + // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) + // and the PATCH method defined in + // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). + // + // If the HTTP request method is not known to instrumentation, it MUST set + // the `http.request.method` attribute to `_OTHER`. + // + // If the HTTP instrumentation could end up converting valid HTTP request + // methods to `_OTHER`, then it MUST provide a way to override + // the list of known HTTP methods. If this override is done via environment + // variable, then the environment variable MUST be named + // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated + // list of case-sensitive known HTTP methods + // (this list MUST be a full override of the default known method, it is + // not a list of known methods in addition to the defaults). + // + // HTTP method names are case-sensitive and `http.request.method` attribute + // value MUST match a known HTTP method name exactly. + // Instrumentations for specific web frameworks that consider HTTP methods + // to be case insensitive, SHOULD populate a canonical equivalent. + // Tracing instrumentations that do so, MUST also set + // `http.request.method_original` to the original value. + HTTPRequestMethodKey = attribute.Key("http.request.method") + + // HTTPRequestMethodOriginalKey is the attribute Key conforming to the + // "http.request.method_original" semantic conventions. It represents the + // original HTTP method sent by the client in the request line. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'GeT', 'ACL', 'foo' + HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") + + // HTTPRequestResendCountKey is the attribute Key conforming to the + // "http.request.resend_count" semantic conventions. It represents the + // ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") + + // HTTPRequestSizeKey is the attribute Key conforming to the + // "http.request.size" semantic conventions. It represents the total size + // of the request in bytes. This should be the total number of bytes sent + // over the wire, including the request line (HTTP/1.1), framing (HTTP/2 + // and HTTP/3), headers, and request body if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPRequestSizeKey = attribute.Key("http.request.size") + + // HTTPResponseBodySizeKey is the attribute Key conforming to the + // "http.response.body.size" semantic conventions. It represents the size + // of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3495 + HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") + + // HTTPResponseSizeKey is the attribute Key conforming to the + // "http.response.size" semantic conventions. It represents the total size + // of the response in bytes. This should be the total number of bytes sent + // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and + // HTTP/3), headers, and response body and trailers if any. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1437 + HTTPResponseSizeKey = attribute.Key("http.response.size") + + // HTTPResponseStatusCodeKey is the attribute Key conforming to the + // "http.response.status_code" semantic conventions. It represents the + // [HTTP response status + // code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 200 + HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route, that is, the path + // template in the format used by the respective server framework. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/docs/http/http-spans.md#http-server-definitions) if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +var ( + // active state + HTTPConnectionStateActive = HTTPConnectionStateKey.String("active") + // idle state + HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle") +) + +var ( + // CONNECT method + HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") + // DELETE method + HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") + // GET method + HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") + // HEAD method + HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") + // OPTIONS method + HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") + // PATCH method + HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") + // POST method + HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") + // PUT method + HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") + // TRACE method + HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") + // Any HTTP method that the instrumentation has no prior knowledge of + HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") +) + +// HTTPRequestBodySize returns an attribute KeyValue conforming to the +// "http.request.body.size" semantic conventions. It represents the size of the +// request payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestBodySize(val int) attribute.KeyValue { + return HTTPRequestBodySizeKey.Int(val) +} + +// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the +// "http.request.method_original" semantic conventions. It represents the +// original HTTP method sent by the client in the request line. +func HTTPRequestMethodOriginal(val string) attribute.KeyValue { + return HTTPRequestMethodOriginalKey.String(val) +} + +// HTTPRequestResendCount returns an attribute KeyValue conforming to the +// "http.request.resend_count" semantic conventions. It represents the ordinal +// number of request resending attempt (for any reason, including redirects). +func HTTPRequestResendCount(val int) attribute.KeyValue { + return HTTPRequestResendCountKey.Int(val) +} + +// HTTPRequestSize returns an attribute KeyValue conforming to the +// "http.request.size" semantic conventions. It represents the total size of +// the request in bytes. This should be the total number of bytes sent over the +// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and request body if any. +func HTTPRequestSize(val int) attribute.KeyValue { + return HTTPRequestSizeKey.Int(val) +} + +// HTTPResponseBodySize returns an attribute KeyValue conforming to the +// "http.response.body.size" semantic conventions. It represents the size of +// the response payload body in bytes. This is the number of bytes transferred +// excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseBodySize(val int) attribute.KeyValue { + return HTTPResponseBodySizeKey.Int(val) +} + +// HTTPResponseSize returns an attribute KeyValue conforming to the +// "http.response.size" semantic conventions. It represents the total size of +// the response in bytes. This should be the total number of bytes sent over +// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3), +// headers, and response body and trailers if any. +func HTTPResponseSize(val int) attribute.KeyValue { + return HTTPResponseSizeKey.Int(val) +} + +// HTTPResponseStatusCode returns an attribute KeyValue conforming to the +// "http.response.status_code" semantic conventions. It represents the [HTTP +// response status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPResponseStatusCode(val int) attribute.KeyValue { + return HTTPResponseStatusCodeKey.Int(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route, that is, the path +// template in the format used by the respective server framework. +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Java Virtual machine related attributes. +const ( + // JvmBufferPoolNameKey is the attribute Key conforming to the + // "jvm.buffer.pool.name" semantic conventions. It represents the name of + // the buffer pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mapped', 'direct' + // Note: Pool names are generally obtained via + // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). + JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") + + // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action" + // semantic conventions. It represents the name of the garbage collector + // action. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'end of minor GC', 'end of major GC' + // Note: Garbage collector action is generally obtained via + // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()). + JvmGcActionKey = attribute.Key("jvm.gc.action") + + // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name" + // semantic conventions. It represents the name of the garbage collector. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Young Generation', 'G1 Old Generation' + // Note: Garbage collector name is generally obtained via + // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()). + JvmGcNameKey = attribute.Key("jvm.gc.name") + + // JvmMemoryPoolNameKey is the attribute Key conforming to the + // "jvm.memory.pool.name" semantic conventions. It represents the name of + // the memory pool. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' + // Note: Pool names are generally obtained via + // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). + JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") + + // JvmMemoryTypeKey is the attribute Key conforming to the + // "jvm.memory.type" semantic conventions. It represents the type of + // memory. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'heap', 'non_heap' + JvmMemoryTypeKey = attribute.Key("jvm.memory.type") + + // JvmThreadDaemonKey is the attribute Key conforming to the + // "jvm.thread.daemon" semantic conventions. It represents the whether the + // thread is daemon or not. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon") + + // JvmThreadStateKey is the attribute Key conforming to the + // "jvm.thread.state" semantic conventions. It represents the state of the + // thread. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'runnable', 'blocked' + JvmThreadStateKey = attribute.Key("jvm.thread.state") +) + +var ( + // Heap memory + JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") + // Non-heap memory + JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") +) + +var ( + // A thread that has not yet started is in this state + JvmThreadStateNew = JvmThreadStateKey.String("new") + // A thread executing in the Java virtual machine is in this state + JvmThreadStateRunnable = JvmThreadStateKey.String("runnable") + // A thread that is blocked waiting for a monitor lock is in this state + JvmThreadStateBlocked = JvmThreadStateKey.String("blocked") + // A thread that is waiting indefinitely for another thread to perform a particular action is in this state + JvmThreadStateWaiting = JvmThreadStateKey.String("waiting") + // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state + JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting") + // A thread that has exited is in this state + JvmThreadStateTerminated = JvmThreadStateKey.String("terminated") +) + +// JvmBufferPoolName returns an attribute KeyValue conforming to the +// "jvm.buffer.pool.name" semantic conventions. It represents the name of the +// buffer pool. +func JvmBufferPoolName(val string) attribute.KeyValue { + return JvmBufferPoolNameKey.String(val) +} + +// JvmGcAction returns an attribute KeyValue conforming to the +// "jvm.gc.action" semantic conventions. It represents the name of the garbage +// collector action. +func JvmGcAction(val string) attribute.KeyValue { + return JvmGcActionKey.String(val) +} + +// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name" +// semantic conventions. It represents the name of the garbage collector. +func JvmGcName(val string) attribute.KeyValue { + return JvmGcNameKey.String(val) +} + +// JvmMemoryPoolName returns an attribute KeyValue conforming to the +// "jvm.memory.pool.name" semantic conventions. It represents the name of the +// memory pool. +func JvmMemoryPoolName(val string) attribute.KeyValue { + return JvmMemoryPoolNameKey.String(val) +} + +// JvmThreadDaemon returns an attribute KeyValue conforming to the +// "jvm.thread.daemon" semantic conventions. It represents the whether the +// thread is daemon or not. +func JvmThreadDaemon(val bool) attribute.KeyValue { + return JvmThreadDaemonKey.Bool(val) +} + +// Kubernetes resource attributes. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") + + // K8SClusterUIDKey is the attribute Key conforming to the + // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for + // the cluster, set to the UID of the `kube-system` namespace. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' + // Note: K8S doesn't have support for obtaining a cluster ID. If this is + // ever + // added, we will recommend collecting the `k8s.cluster.uid` through the + // official APIs. In the meantime, we are able to use the `uid` of the + // `kube-system` namespace as a proxy for cluster ID. Read on for the + // rationale. + // + // Every object created in a K8S cluster is assigned a distinct UID. The + // `kube-system` namespace is used by Kubernetes itself and will exist + // for the lifetime of the cluster. Using the `uid` of the `kube-system` + // namespace is a reasonable proxy for the K8S ClusterID as it will only + // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are + // UUIDs as standardized by + // [ISO/IEC 9834-8 and ITU-T + // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). + // Which states: + // + // > If generated according to one of the mechanisms defined in Rec. + // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be + // different from all other UUIDs generated before 3603 A.D., or is + // extremely likely to be different (depending on the mechanism chosen). + // + // Therefore, UIDs between clusters should be extremely unlikely to + // conflict. + K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") + + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") + + // K8SContainerStatusLastTerminatedReasonKey is the attribute Key + // conforming to the "k8s.container.status.last_terminated_reason" semantic + // conventions. It represents the last terminated reason of the Container. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Evicted', 'Error' + K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") + + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") + + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") + + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") + + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") + + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") + + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") + + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") + + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// K8SClusterUID returns an attribute KeyValue conforming to the +// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the +// cluster, set to the UID of the `kube-system` namespace. +func K8SClusterUID(val string) attribute.KeyValue { + return K8SClusterUIDKey.String(val) +} + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue +// conforming to the "k8s.container.status.last_terminated_reason" semantic +// conventions. It represents the last terminated reason of the Container. +func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue { + return K8SContainerStatusLastTerminatedReasonKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// Log attributes +const ( + // LogIostreamKey is the attribute Key conforming to the "log.iostream" + // semantic conventions. It represents the stream associated with the log. + // See below for a list of well-known values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + LogIostreamKey = attribute.Key("log.iostream") +) + +var ( + // Logs from stdout stream + LogIostreamStdout = LogIostreamKey.String("stdout") + // Events from stderr stream + LogIostreamStderr = LogIostreamKey.String("stderr") +) + +// Attributes for a file to which log was emitted. +const ( + // LogFileNameKey is the attribute Key conforming to the "log.file.name" + // semantic conventions. It represents the basename of the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'audit.log' + LogFileNameKey = attribute.Key("log.file.name") + + // LogFileNameResolvedKey is the attribute Key conforming to the + // "log.file.name_resolved" semantic conventions. It represents the + // basename of the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'uuid.log' + LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") + + // LogFilePathKey is the attribute Key conforming to the "log.file.path" + // semantic conventions. It represents the full path to the file. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/log/mysql/audit.log' + LogFilePathKey = attribute.Key("log.file.path") + + // LogFilePathResolvedKey is the attribute Key conforming to the + // "log.file.path_resolved" semantic conventions. It represents the full + // path to the file, with symlinks resolved. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/var/lib/docker/uuid.log' + LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") +) + +// LogFileName returns an attribute KeyValue conforming to the +// "log.file.name" semantic conventions. It represents the basename of the +// file. +func LogFileName(val string) attribute.KeyValue { + return LogFileNameKey.String(val) +} + +// LogFileNameResolved returns an attribute KeyValue conforming to the +// "log.file.name_resolved" semantic conventions. It represents the basename of +// the file, with symlinks resolved. +func LogFileNameResolved(val string) attribute.KeyValue { + return LogFileNameResolvedKey.String(val) +} + +// LogFilePath returns an attribute KeyValue conforming to the +// "log.file.path" semantic conventions. It represents the full path to the +// file. +func LogFilePath(val string) attribute.KeyValue { + return LogFilePathKey.String(val) +} + +// LogFilePathResolved returns an attribute KeyValue conforming to the +// "log.file.path_resolved" semantic conventions. It represents the full path +// to the file, with symlinks resolved. +func LogFilePathResolved(val string) attribute.KeyValue { + return LogFilePathResolvedKey.String(val) +} + +// The generic attributes that may be used in any Log Record. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Attributes describing telemetry around messaging systems and messaging +// activities. +const ( + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") + + // MessagingClientIDKey is the attribute Key conforming to the + // "messaging.client.id" semantic conventions. It represents a unique + // identifier for the client that consumes or produces a message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'client-5', 'myhost@8742@s8083jm' + MessagingClientIDKey = attribute.Key("messaging.client.id") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") + + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker doesn't have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationPartitionIDKey is the attribute Key conforming to + // the "messaging.destination.partition.id" semantic conventions. It + // represents the identifier of the partition messages are sent to or + // received from, unique within the `messaging.destination.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1' + MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationPublishAnonymousKey is the attribute Key conforming + // to the "messaging.destination_publish.anonymous" semantic conventions. + // It represents a boolean that is true if the publish message destination + // is anonymous (could be unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") + + // MessagingDestinationPublishNameKey is the attribute Key conforming to + // the "messaging.destination_publish.name" semantic conventions. It + // represents the name of the original destination the message was + // published to + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyQueue', 'MyTopic' + // Note: The name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker doesn't have such notion, the original destination name + // SHOULD uniquely identify the broker. + MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") + + // MessagingMessageBodySizeKey is the attribute Key conforming to the + // "messaging.message.body.size" semantic conventions. It represents the + // size of the message body in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1439 + // Note: This can refer to both the compressed or uncompressed body size. + // If both sizes are known, the uncompressed + // body size should be used. + MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the conversation ID identifying the conversation to which the message + // belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the + // "messaging.message.envelope.size" semantic conventions. It represents + // the size of the message body and metadata in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2738 + // Note: This can refer to both the compressed or uncompressed size. If + // both sizes are known, the uncompressed + // size should be used. + MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") + + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingOperationNameKey is the attribute Key conforming to the + // "messaging.operation.name" semantic conventions. It represents the + // system-specific name of the messaging operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack', 'nack', 'send' + MessagingOperationNameKey = attribute.Key("messaging.operation.name") + + // MessagingOperationTypeKey is the attribute Key conforming to the + // "messaging.operation.type" semantic conventions. It represents a string + // identifying the type of the messaging operation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationTypeKey = attribute.Key("messaging.operation.type") + + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents the messaging + // system as identified by the client instrumentation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The actual messaging system may differ from the one known by the + // client. For example, when using Kafka client libraries to communicate + // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on + // the instrumentation's best knowledge. + MessagingSystemKey = attribute.Key("messaging.system") +) + +var ( + // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created + MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish") + // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios + MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create") + // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages + MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive") + // One or more messages are delivered to or processed by a consumer + MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process") + // One or more messages are settled + MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle") +) + +var ( + // Apache ActiveMQ + MessagingSystemActivemq = MessagingSystemKey.String("activemq") + // Amazon Simple Queue Service (SQS) + MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") + // Azure Event Grid + MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid") + // Azure Event Hubs + MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs") + // Azure Service Bus + MessagingSystemServicebus = MessagingSystemKey.String("servicebus") + // Google Cloud Pub/Sub + MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") + // Java Message Service + MessagingSystemJms = MessagingSystemKey.String("jms") + // Apache Kafka + MessagingSystemKafka = MessagingSystemKey.String("kafka") + // RabbitMQ + MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") + // Apache RocketMQ + MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") +) + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// MessagingClientID returns an attribute KeyValue conforming to the +// "messaging.client.id" semantic conventions. It represents a unique +// identifier for the client that consumes or produces a message. +func MessagingClientID(val string) attribute.KeyValue { + return MessagingClientIDKey.String(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationPartitionID returns an attribute KeyValue conforming +// to the "messaging.destination.partition.id" semantic conventions. It +// represents the identifier of the partition messages are sent to or received +// from, unique within the `messaging.destination.name`. +func MessagingDestinationPartitionID(val string) attribute.KeyValue { + return MessagingDestinationPartitionIDKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationPublishAnonymous returns an attribute KeyValue +// conforming to the "messaging.destination_publish.anonymous" semantic +// conventions. It represents a boolean that is true if the publish message +// destination is anonymous (could be unnamed or have auto-generated name). +func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationPublishAnonymousKey.Bool(val) +} + +// MessagingDestinationPublishName returns an attribute KeyValue conforming +// to the "messaging.destination_publish.name" semantic conventions. It +// represents the name of the original destination the message was published to +func MessagingDestinationPublishName(val string) attribute.KeyValue { + return MessagingDestinationPublishNameKey.String(val) +} + +// MessagingMessageBodySize returns an attribute KeyValue conforming to the +// "messaging.message.body.size" semantic conventions. It represents the size +// of the message body in bytes. +func MessagingMessageBodySize(val int) attribute.KeyValue { + return MessagingMessageBodySizeKey.Int(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the conversation ID identifying the conversation to which the +// message belongs, represented as a string. Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to +// the "messaging.message.envelope.size" semantic conventions. It represents +// the size of the message body and metadata in bytes. +func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { + return MessagingMessageEnvelopeSizeKey.Int(val) +} + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingOperationName returns an attribute KeyValue conforming to the +// "messaging.operation.name" semantic conventions. It represents the +// system-specific name of the messaging operation. +func MessagingOperationName(val string) attribute.KeyValue { + return MessagingOperationNameKey.String(val) +} + +// This group describes attributes specific to Apache Kafka. +const ( + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// This group describes attributes specific to RabbitMQ. +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") + + // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming + // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. + // It represents the rabbitMQ message delivery tag + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 123 + MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic +// conventions. It represents the rabbitMQ message delivery tag +func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue { + return MessagingRabbitmqMessageDeliveryTagKey.Int(val) +} + +// This group describes attributes specific to RocketMQ. +const ( + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// This group describes attributes specific to GCP Pub/Sub. +const ( + // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. + // It represents the ack deadline in seconds set for the modify ack + // deadline request. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 10 + MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline") + + // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the + // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It + // represents the ack id for a given message. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ack_id' + MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id") + + // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key + // conforming to the "messaging.gcp_pubsub.message.delivery_attempt" + // semantic conventions. It represents the delivery attempt for a given + // message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt") + + // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming + // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. + // It represents the ordering key for a given message. If the attribute is + // not present, the message does not have an ordering key. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ordering_key' + MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") +) + +// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic +// conventions. It represents the ack deadline in seconds set for the modify +// ack deadline request. +func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageAckDeadlineKey.Int(val) +} + +// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming +// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It +// represents the ack id for a given message. +func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageAckIDKey.String(val) +} + +// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic +// conventions. It represents the delivery attempt for a given message. +func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue { + return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val) +} + +// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue +// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic +// conventions. It represents the ordering key for a given message. If the +// attribute is not present, the message does not have an ordering key. +func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { + return MessagingGCPPubsubMessageOrderingKeyKey.String(val) +} + +// This group describes attributes specific to Azure Service Bus. +const ( + // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key + // conforming to the "messaging.servicebus.destination.subscription_name" + // semantic conventions. It represents the name of the subscription in the + // topic messages are received from. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'mySubscription' + MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name") + + // MessagingServicebusDispositionStatusKey is the attribute Key conforming + // to the "messaging.servicebus.disposition_status" semantic conventions. + // It represents the describes the [settlement + // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status") + + // MessagingServicebusMessageDeliveryCountKey is the attribute Key + // conforming to the "messaging.servicebus.message.delivery_count" semantic + // conventions. It represents the number of deliveries that have been + // attempted for this message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 2 + MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count") + + // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key + // conforming to the "messaging.servicebus.message.enqueued_time" semantic + // conventions. It represents the UTC epoch seconds at which the message + // has been accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time") +) + +var ( + // Message is completed + MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete") + // Message is abandoned + MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon") + // Message is sent to dead letter queue + MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter") + // Message is deferred + MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer") +) + +// MessagingServicebusDestinationSubscriptionName returns an attribute +// KeyValue conforming to the +// "messaging.servicebus.destination.subscription_name" semantic conventions. +// It represents the name of the subscription in the topic messages are +// received from. +func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue { + return MessagingServicebusDestinationSubscriptionNameKey.String(val) +} + +// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.delivery_count" semantic +// conventions. It represents the number of deliveries that have been attempted +// for this message. +func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue { + return MessagingServicebusMessageDeliveryCountKey.Int(val) +} + +// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.servicebus.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingServicebusMessageEnqueuedTimeKey.Int(val) +} + +// This group describes attributes specific to Azure Event Hubs. +const ( + // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to + // the "messaging.eventhubs.consumer.group" semantic conventions. It + // represents the name of the consumer group the event consumer is + // associated with. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'indexer' + MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group") + + // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming + // to the "messaging.eventhubs.message.enqueued_time" semantic conventions. + // It represents the UTC epoch seconds at which the message has been + // accepted and stored in the entity. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1701393730 + MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time") +) + +// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming +// to the "messaging.eventhubs.consumer.group" semantic conventions. It +// represents the name of the consumer group the event consumer is associated +// with. +func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue { + return MessagingEventhubsConsumerGroupKey.String(val) +} + +// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue +// conforming to the "messaging.eventhubs.message.enqueued_time" semantic +// conventions. It represents the UTC epoch seconds at which the message has +// been accepted and stored in the entity. +func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue { + return MessagingEventhubsMessageEnqueuedTimeKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetworkCarrierIccKey is the attribute Key conforming to the + // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 + // alpha-2 2-character country code associated with the mobile carrier + // network. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'DE' + NetworkCarrierIccKey = attribute.Key("network.carrier.icc") + + // NetworkCarrierMccKey is the attribute Key conforming to the + // "network.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '310' + NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") + + // NetworkCarrierMncKey is the attribute Key conforming to the + // "network.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '001' + NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") + + // NetworkCarrierNameKey is the attribute Key conforming to the + // "network.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'sprint' + NetworkCarrierNameKey = attribute.Key("network.carrier.name") + + // NetworkConnectionSubtypeKey is the attribute Key conforming to the + // "network.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'LTE' + NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") + + // NetworkConnectionTypeKey is the attribute Key conforming to the + // "network.connection.type" semantic conventions. It represents the + // internet connection type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'wifi' + NetworkConnectionTypeKey = attribute.Key("network.connection.type") + + // NetworkIoDirectionKey is the attribute Key conforming to the + // "network.io.direction" semantic conventions. It represents the network + // IO operation direction. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'transmit' + NetworkIoDirectionKey = attribute.Key("network.io.direction") + + // NetworkLocalAddressKey is the attribute Key conforming to the + // "network.local.address" semantic conventions. It represents the local + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkLocalAddressKey = attribute.Key("network.local.address") + + // NetworkLocalPortKey is the attribute Key conforming to the + // "network.local.port" semantic conventions. It represents the local port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkLocalPortKey = attribute.Key("network.local.port") + + // NetworkPeerAddressKey is the attribute Key conforming to the + // "network.peer.address" semantic conventions. It represents the peer + // address of the network connection - IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10.1.2.80', '/tmp/my.sock' + NetworkPeerAddressKey = attribute.Key("network.peer.address") + + // NetworkPeerPortKey is the attribute Key conforming to the + // "network.peer.port" semantic conventions. It represents the peer port + // number of the network connection. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 65123 + NetworkPeerPortKey = attribute.Key("network.peer.port") + + // NetworkProtocolNameKey is the attribute Key conforming to the + // "network.protocol.name" semantic conventions. It represents the [OSI + // application layer](https://osi-model.com/application-layer/) or non-OSI + // equivalent. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + // Note: The value SHOULD be normalized to lowercase. + NetworkProtocolNameKey = attribute.Key("network.protocol.name") + + // NetworkProtocolVersionKey is the attribute Key conforming to the + // "network.protocol.version" semantic conventions. It represents the + // actual version of the protocol used for network communication. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.1', '2' + // Note: If protocol version is subject to negotiation (for example using + // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute + // SHOULD be set to the negotiated version. If the actual protocol version + // is not known, this attribute SHOULD NOT be set. + NetworkProtocolVersionKey = attribute.Key("network.protocol.version") + + // NetworkTransportKey is the attribute Key conforming to the + // "network.transport" semantic conventions. It represents the [OSI + // transport layer](https://osi-model.com/transport-layer/) or + // [inter-process communication + // method](https://wikipedia.org/wiki/Inter-process_communication). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tcp', 'udp' + // Note: The value SHOULD be normalized to lowercase. + // + // Consider always setting the transport when setting a port number, since + // a port number is ambiguous without knowing the transport. For example + // different processes could be listening on TCP port 12345 and UDP port + // 12345. + NetworkTransportKey = attribute.Key("network.transport") + + // NetworkTypeKey is the attribute Key conforming to the "network.type" + // semantic conventions. It represents the [OSI network + // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ipv4', 'ipv6' + // Note: The value SHOULD be normalized to lowercase. + NetworkTypeKey = attribute.Key("network.type") +) + +var ( + // GPRS + NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") + // EDGE + NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") + // UMTS + NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") + // CDMA + NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") + // HSPA + NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") + // IDEN + NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") + // LTE + NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") + // EHRPD + NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") + // GSM + NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") +) + +var ( + // wifi + NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") + // wired + NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") + // cell + NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") + // unavailable + NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") + // unknown + NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") +) + +var ( + // transmit + NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") + // receive + NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") +) + +var ( + // TCP + NetworkTransportTCP = NetworkTransportKey.String("tcp") + // UDP + NetworkTransportUDP = NetworkTransportKey.String("udp") + // Named or anonymous pipe + NetworkTransportPipe = NetworkTransportKey.String("pipe") + // Unix domain socket + NetworkTransportUnix = NetworkTransportKey.String("unix") +) + +var ( + // IPv4 + NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") + // IPv6 + NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") +) + +// NetworkCarrierIcc returns an attribute KeyValue conforming to the +// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetworkCarrierIcc(val string) attribute.KeyValue { + return NetworkCarrierIccKey.String(val) +} + +// NetworkCarrierMcc returns an attribute KeyValue conforming to the +// "network.carrier.mcc" semantic conventions. It represents the mobile carrier +// country code. +func NetworkCarrierMcc(val string) attribute.KeyValue { + return NetworkCarrierMccKey.String(val) +} + +// NetworkCarrierMnc returns an attribute KeyValue conforming to the +// "network.carrier.mnc" semantic conventions. It represents the mobile carrier +// network code. +func NetworkCarrierMnc(val string) attribute.KeyValue { + return NetworkCarrierMncKey.String(val) +} + +// NetworkCarrierName returns an attribute KeyValue conforming to the +// "network.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetworkCarrierName(val string) attribute.KeyValue { + return NetworkCarrierNameKey.String(val) +} + +// NetworkLocalAddress returns an attribute KeyValue conforming to the +// "network.local.address" semantic conventions. It represents the local +// address of the network connection - IP address or Unix domain socket name. +func NetworkLocalAddress(val string) attribute.KeyValue { + return NetworkLocalAddressKey.String(val) +} + +// NetworkLocalPort returns an attribute KeyValue conforming to the +// "network.local.port" semantic conventions. It represents the local port +// number of the network connection. +func NetworkLocalPort(val int) attribute.KeyValue { + return NetworkLocalPortKey.Int(val) +} + +// NetworkPeerAddress returns an attribute KeyValue conforming to the +// "network.peer.address" semantic conventions. It represents the peer address +// of the network connection - IP address or Unix domain socket name. +func NetworkPeerAddress(val string) attribute.KeyValue { + return NetworkPeerAddressKey.String(val) +} + +// NetworkPeerPort returns an attribute KeyValue conforming to the +// "network.peer.port" semantic conventions. It represents the peer port number +// of the network connection. +func NetworkPeerPort(val int) attribute.KeyValue { + return NetworkPeerPortKey.Int(val) +} + +// NetworkProtocolName returns an attribute KeyValue conforming to the +// "network.protocol.name" semantic conventions. It represents the [OSI +// application layer](https://osi-model.com/application-layer/) or non-OSI +// equivalent. +func NetworkProtocolName(val string) attribute.KeyValue { + return NetworkProtocolNameKey.String(val) +} + +// NetworkProtocolVersion returns an attribute KeyValue conforming to the +// "network.protocol.version" semantic conventions. It represents the actual +// version of the protocol used for network communication. +func NetworkProtocolVersion(val string) attribute.KeyValue { + return NetworkProtocolVersionKey.String(val) +} + +// An OCI image manifest. +const ( + // OciManifestDigestKey is the attribute Key conforming to the + // "oci.manifest.digest" semantic conventions. It represents the digest of + // the OCI image manifest. For container images specifically is the digest + // by which the container image is known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' + // Note: Follows [OCI Image Manifest + // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), + // and specifically the [Digest + // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). + // An example can be found in [Example Image + // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). + OciManifestDigestKey = attribute.Key("oci.manifest.digest") +) + +// OciManifestDigest returns an attribute KeyValue conforming to the +// "oci.manifest.digest" semantic conventions. It represents the digest of the +// OCI image manifest. For container images specifically is the digest by which +// the container image is known. +func OciManifestDigest(val string) attribute.KeyValue { + return OciManifestDigestKey.String(val) +} + +// Attributes used by the OpenTracing Shim layer. +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span doesn't depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSBuildIDKey is the attribute Key conforming to the "os.build_id" + // semantic conventions. It represents the unique identifier for a + // particular build or compilation of the operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' + OSBuildIDKey = attribute.Key("os.build_id") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + OSTypeKey = attribute.Key("os.type") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](/docs/resource/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" +// semantic conventions. It represents the unique identifier for a particular +// build or compilation of the operating system. +func OSBuildID(val string) attribute.KeyValue { + return OSBuildIDKey.String(val) +} + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](/docs/resource/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// Attributes reserved for OpenTelemetry +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](/docs/resource/README.md#service) of the remote + // service. SHOULD be equal to the actual `service.name` resource attribute + // of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](/docs/resource/README.md#service) of the remote service. +// SHOULD be equal to the actual `service.name` resource attribute of the +// remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// An operating system process. +const ( + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessContextSwitchTypeKey is the attribute Key conforming to the + // "process.context_switch_type" semantic conventions. It represents the + // specifies whether the context switches for this data point were + // voluntary or involuntary. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type") + + // ProcessCreationTimeKey is the attribute Key conforming to the + // "process.creation.time" semantic conventions. It represents the date and + // time the process was created, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:25:34.853Z' + ProcessCreationTimeKey = attribute.Key("process.creation.time") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessExitCodeKey is the attribute Key conforming to the + // "process.exit.code" semantic conventions. It represents the exit code of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 127 + ProcessExitCodeKey = attribute.Key("process.exit.code") + + // ProcessExitTimeKey is the attribute Key conforming to the + // "process.exit.time" semantic conventions. It represents the date and + // time the process exited, in ISO 8601 format. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2023-11-21T09:26:12.315Z' + ProcessExitTimeKey = attribute.Key("process.exit.time") + + // ProcessGroupLeaderPIDKey is the attribute Key conforming to the + // "process.group_leader.pid" semantic conventions. It represents the PID + // of the process's group leader. This is also the process group ID (PGID) + // of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 23 + ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid") + + // ProcessInteractiveKey is the attribute Key conforming to the + // "process.interactive" semantic conventions. It represents the whether + // the process is connected to an interactive shell. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + ProcessInteractiveKey = attribute.Key("process.interactive") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") + + // ProcessPagingFaultTypeKey is the attribute Key conforming to the + // "process.paging.fault_type" semantic conventions. It represents the type + // of page fault for this data point. Type `major` is for major/hard page + // faults, and `minor` is for minor/soft page faults. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PPID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessRealUserIDKey is the attribute Key conforming to the + // "process.real_user.id" semantic conventions. It represents the real user + // ID (RUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1000 + ProcessRealUserIDKey = attribute.Key("process.real_user.id") + + // ProcessRealUserNameKey is the attribute Key conforming to the + // "process.real_user.name" semantic conventions. It represents the + // username of the real user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessRealUserNameKey = attribute.Key("process.real_user.name") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") + + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessSavedUserIDKey is the attribute Key conforming to the + // "process.saved_user.id" semantic conventions. It represents the saved + // user ID (SUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1002 + ProcessSavedUserIDKey = attribute.Key("process.saved_user.id") + + // ProcessSavedUserNameKey is the attribute Key conforming to the + // "process.saved_user.name" semantic conventions. It represents the + // username of the saved user. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'operator' + ProcessSavedUserNameKey = attribute.Key("process.saved_user.name") + + // ProcessSessionLeaderPIDKey is the attribute Key conforming to the + // "process.session_leader.pid" semantic conventions. It represents the PID + // of the process's session leader. This is also the session ID (SID) of + // the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 14 + ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid") + + // ProcessUserIDKey is the attribute Key conforming to the + // "process.user.id" semantic conventions. It represents the effective user + // ID (EUID) of the process. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1001 + ProcessUserIDKey = attribute.Key("process.user.id") + + // ProcessUserNameKey is the attribute Key conforming to the + // "process.user.name" semantic conventions. It represents the username of + // the effective user of the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'root' + ProcessUserNameKey = attribute.Key("process.user.name") + + // ProcessVpidKey is the attribute Key conforming to the "process.vpid" + // semantic conventions. It represents the virtual process identifier. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 12 + // Note: The process ID within a PID namespace. This is not necessarily + // unique across all processes on the host but it is unique within the + // process namespace that the process exists within. + ProcessVpidKey = attribute.Key("process.vpid") +) + +var ( + // voluntary + ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary") + // involuntary + ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary") +) + +var ( + // major + ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major") + // minor + ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor") +) + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCreationTime returns an attribute KeyValue conforming to the +// "process.creation.time" semantic conventions. It represents the date and +// time the process was created, in ISO 8601 format. +func ProcessCreationTime(val string) attribute.KeyValue { + return ProcessCreationTimeKey.String(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessExitCode returns an attribute KeyValue conforming to the +// "process.exit.code" semantic conventions. It represents the exit code of the +// process. +func ProcessExitCode(val int) attribute.KeyValue { + return ProcessExitCodeKey.Int(val) +} + +// ProcessExitTime returns an attribute KeyValue conforming to the +// "process.exit.time" semantic conventions. It represents the date and time +// the process exited, in ISO 8601 format. +func ProcessExitTime(val string) attribute.KeyValue { + return ProcessExitTimeKey.String(val) +} + +// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the +// "process.group_leader.pid" semantic conventions. It represents the PID of +// the process's group leader. This is also the process group ID (PGID) of the +// process. +func ProcessGroupLeaderPID(val int) attribute.KeyValue { + return ProcessGroupLeaderPIDKey.Int(val) +} + +// ProcessInteractive returns an attribute KeyValue conforming to the +// "process.interactive" semantic conventions. It represents the whether the +// process is connected to an interactive shell. +func ProcessInteractive(val bool) attribute.KeyValue { + return ProcessInteractiveKey.Bool(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PPID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessRealUserID returns an attribute KeyValue conforming to the +// "process.real_user.id" semantic conventions. It represents the real user ID +// (RUID) of the process. +func ProcessRealUserID(val int) attribute.KeyValue { + return ProcessRealUserIDKey.Int(val) +} + +// ProcessRealUserName returns an attribute KeyValue conforming to the +// "process.real_user.name" semantic conventions. It represents the username of +// the real user of the process. +func ProcessRealUserName(val string) attribute.KeyValue { + return ProcessRealUserNameKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessSavedUserID returns an attribute KeyValue conforming to the +// "process.saved_user.id" semantic conventions. It represents the saved user +// ID (SUID) of the process. +func ProcessSavedUserID(val int) attribute.KeyValue { + return ProcessSavedUserIDKey.Int(val) +} + +// ProcessSavedUserName returns an attribute KeyValue conforming to the +// "process.saved_user.name" semantic conventions. It represents the username +// of the saved user. +func ProcessSavedUserName(val string) attribute.KeyValue { + return ProcessSavedUserNameKey.String(val) +} + +// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the +// "process.session_leader.pid" semantic conventions. It represents the PID of +// the process's session leader. This is also the session ID (SID) of the +// process. +func ProcessSessionLeaderPID(val int) attribute.KeyValue { + return ProcessSessionLeaderPIDKey.Int(val) +} + +// ProcessUserID returns an attribute KeyValue conforming to the +// "process.user.id" semantic conventions. It represents the effective user ID +// (EUID) of the process. +func ProcessUserID(val int) attribute.KeyValue { + return ProcessUserIDKey.Int(val) +} + +// ProcessUserName returns an attribute KeyValue conforming to the +// "process.user.name" semantic conventions. It represents the username of the +// effective user of the process. +func ProcessUserName(val string) attribute.KeyValue { + return ProcessUserNameKey.String(val) +} + +// ProcessVpid returns an attribute KeyValue conforming to the +// "process.vpid" semantic conventions. It represents the virtual process +// identifier. +func ProcessVpid(val int) attribute.KeyValue { + return ProcessVpidKey.Int(val) +} + +// Attributes for process CPU +const ( + // ProcessCPUStateKey is the attribute Key conforming to the + // "process.cpu.state" semantic conventions. It represents the CPU state of + // the process. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + ProcessCPUStateKey = attribute.Key("process.cpu.state") +) + +var ( + // system + ProcessCPUStateSystem = ProcessCPUStateKey.String("system") + // user + ProcessCPUStateUser = ProcessCPUStateKey.String("user") + // wait + ProcessCPUStateWait = ProcessCPUStateKey.String("wait") +) + +// Attributes for remote procedure calls. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") + + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // doesn't specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCMessageCompressedSizeKey is the attribute Key conforming to the + // "rpc.message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size") + + // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Note: This way we guarantee that the values will be consistent between + // different implementations. + RPCMessageIDKey = attribute.Key("rpc.message.id") + + // RPCMessageTypeKey is the attribute Key conforming to the + // "rpc.message.type" semantic conventions. It represents the whether this + // is a received or sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCMessageTypeKey = attribute.Key("rpc.message.type") + + // RPCMessageUncompressedSizeKey is the attribute Key conforming to the + // "rpc.message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + RPCSystemKey = attribute.Key("rpc.system") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +var ( + // sent + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + // received + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// doesn't specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCMessageCompressedSize returns an attribute KeyValue conforming to the +// "rpc.message.compressed_size" semantic conventions. It represents the +// compressed size of the message in bytes. +func RPCMessageCompressedSize(val int) attribute.KeyValue { + return RPCMessageCompressedSizeKey.Int(val) +} + +// RPCMessageID returns an attribute KeyValue conforming to the +// "rpc.message.id" semantic conventions. It represents the mUST be calculated +// as two different counters starting from `1` one for sent messages and one +// for received message. +func RPCMessageID(val int) attribute.KeyValue { + return RPCMessageIDKey.Int(val) +} + +// RPCMessageUncompressedSize returns an attribute KeyValue conforming to +// the "rpc.message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func RPCMessageUncompressedSize(val int) attribute.KeyValue { + return RPCMessageUncompressedSizeKey.Int(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// These attributes may be used to describe the server in a connection-based +// network interaction where there is one side that initiates the connection +// (the client is the side that initiates the connection). This covers all TCP +// network interactions since TCP is connection-based and one side initiates +// the connection (an exception is made for peer-to-peer communication over TCP +// where the "user-facing" surface of the protocol / API doesn't expose a clear +// notion of client and server). This also covers UDP network interactions +// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. +const ( + // ServerAddressKey is the attribute Key conforming to the "server.address" + // semantic conventions. It represents the server domain name if available + // without reverse DNS lookup; otherwise, IP address or Unix domain socket + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.address` SHOULD represent the server address + // behind any intermediaries, for example proxies, if it's available. + ServerAddressKey = attribute.Key("server.address") + + // ServerPortKey is the attribute Key conforming to the "server.port" + // semantic conventions. It represents the server port number. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + // Note: When observed from the client side, and when communicating through + // an intermediary, `server.port` SHOULD represent the server port behind + // any intermediaries, for example proxies, if it's available. + ServerPortKey = attribute.Key("server.port") +) + +// ServerAddress returns an attribute KeyValue conforming to the +// "server.address" semantic conventions. It represents the server domain name +// if available without reverse DNS lookup; otherwise, IP address or Unix +// domain socket name. +func ServerAddress(val string) attribute.KeyValue { + return ServerAddressKey.String(val) +} + +// ServerPort returns an attribute KeyValue conforming to the "server.port" +// semantic conventions. It represents the server port number. +func ServerPort(val int) attribute.KeyValue { + return ServerPortKey.Int(val) +} + +// A service instance. +const ( + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to + // distinguish instances of the same service that exist at the same time + // (e.g. instances of a horizontally scaled + // service). + // + // Implementations, such as SDKs, are recommended to generate a random + // Version 1 or Version 4 [RFC + // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an + // inherent unique ID as the source of + // this value if stability is desirable. In that case, the ID SHOULD be + // used as source of a UUID Version 5 and + // SHOULD use the following UUID as the namespace: + // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`. + // + // UUIDs are typically recommended, as only an opaque value for the + // purposes of identifying a service instance is + // needed. Similar to what can be seen in the man page for the + // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html) + // file, the underlying + // data, such as pod name and namespace should be treated as confidential, + // being the user's choice to expose it + // or not via another resource attribute. + // + // For applications running behind an application server (like unicorn), we + // do not recommend using one identifier + // for all processes participating in the application. Instead, it's + // recommended each division (e.g. a worker + // thread in unicorn) to have its own instance.id. + // + // It's not recommended for a Collector to set `service.instance.id` if it + // can't unambiguously determine the + // service instance that is generating that telemetry. For instance, + // creating an UUID based on `pod.name` will + // likely be wrong, as the Collector might not know from which container + // within that pod the telemetry originated. + // However, Collectors can set the `service.instance.id` if they can + // unambiguously determine the service instance + // for that telemetry. This is typically the case for scraping receivers, + // as they know the target address and + // port. + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If + // `process.executable.name` is not available, the value MUST be set to + // `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. The format is not defined by these + // conventions. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0', 'a01dbef8a' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. The format is not defined by these +// conventions. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// Session is defined as the period of time encompassing all activities +// performed by the application and the actions executed by the end user. +// Consequently, a Session is represented as a collection of Logs, Events, and +// Spans emitted by the Client Application throughout the Session's duration. +// Each Session is assigned a unique identifier, which is included as an +// attribute in the Logs, Events, and Spans generated during the Session's +// lifecycle. +// When a session reaches end of life, typically due to user inactivity or +// session timeout, a new session identifier will be assigned. The previous +// session identifier may be provided by the instrumentation so that telemetry +// backends can link the two sessions. +const ( + // SessionIDKey is the attribute Key conforming to the "session.id" + // semantic conventions. It represents a unique id to identify a session. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionIDKey = attribute.Key("session.id") + + // SessionPreviousIDKey is the attribute Key conforming to the + // "session.previous_id" semantic conventions. It represents the previous + // `session.id` for this user, when known. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '00112233-4455-6677-8899-aabbccddeeff' + SessionPreviousIDKey = attribute.Key("session.previous_id") +) + +// SessionID returns an attribute KeyValue conforming to the "session.id" +// semantic conventions. It represents a unique id to identify a session. +func SessionID(val string) attribute.KeyValue { + return SessionIDKey.String(val) +} + +// SessionPreviousID returns an attribute KeyValue conforming to the +// "session.previous_id" semantic conventions. It represents the previous +// `session.id` for this user, when known. +func SessionPreviousID(val string) attribute.KeyValue { + return SessionPreviousIDKey.String(val) +} + +// SignalR attributes +const ( + // SignalrConnectionStatusKey is the attribute Key conforming to the + // "signalr.connection.status" semantic conventions. It represents the + // signalR HTTP connection closure status. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'app_shutdown', 'timeout' + SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") + + // SignalrTransportKey is the attribute Key conforming to the + // "signalr.transport" semantic conventions. It represents the [SignalR + // transport + // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'web_sockets', 'long_polling' + SignalrTransportKey = attribute.Key("signalr.transport") +) + +var ( + // The connection was closed normally + SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") + // The connection was closed due to a timeout + SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") + // The connection was closed because the app is shutting down + SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") +) + +var ( + // ServerSentEvents protocol + SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") + // LongPolling protocol + SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") + // WebSockets protocol + SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") +) + +// These attributes may be used to describe the sender of a network +// exchange/packet. These should be used when there is no client/server +// relationship between the two sides, or when that relationship is unknown. +// This covers low-level network interactions (e.g. packet tracing) where you +// don't know if there was a connection or which side initiated it. This also +// covers unidirectional UDP flows and peer-to-peer communication where the +// "user-facing" surface of the protocol / API doesn't expose a clear notion of +// client and server. +const ( + // SourceAddressKey is the attribute Key conforming to the "source.address" + // semantic conventions. It represents the source address - domain name if + // available without reverse DNS lookup; otherwise, IP address or Unix + // domain socket name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' + // Note: When observed from the destination side, and when communicating + // through an intermediary, `source.address` SHOULD represent the source + // address behind any intermediaries, for example proxies, if it's + // available. + SourceAddressKey = attribute.Key("source.address") + + // SourcePortKey is the attribute Key conforming to the "source.port" + // semantic conventions. It represents the source port number + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 3389, 2888 + SourcePortKey = attribute.Key("source.port") +) + +// SourceAddress returns an attribute KeyValue conforming to the +// "source.address" semantic conventions. It represents the source address - +// domain name if available without reverse DNS lookup; otherwise, IP address +// or Unix domain socket name. +func SourceAddress(val string) attribute.KeyValue { + return SourceAddressKey.String(val) +} + +// SourcePort returns an attribute KeyValue conforming to the "source.port" +// semantic conventions. It represents the source port number +func SourcePort(val int) attribute.KeyValue { + return SourcePortKey.Int(val) +} + +// Describes System attributes +const ( + // SystemDeviceKey is the attribute Key conforming to the "system.device" + // semantic conventions. It represents the device identifier + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '(identifier)' + SystemDeviceKey = attribute.Key("system.device") +) + +// SystemDevice returns an attribute KeyValue conforming to the +// "system.device" semantic conventions. It represents the device identifier +func SystemDevice(val string) attribute.KeyValue { + return SystemDeviceKey.String(val) +} + +// Describes System CPU attributes +const ( + // SystemCPULogicalNumberKey is the attribute Key conforming to the + // "system.cpu.logical_number" semantic conventions. It represents the + // logical CPU number [0..n-1] + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 1 + SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") + + // SystemCPUStateKey is the attribute Key conforming to the + // "system.cpu.state" semantic conventions. It represents the state of the + // CPU + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'idle', 'interrupt' + SystemCPUStateKey = attribute.Key("system.cpu.state") +) + +var ( + // user + SystemCPUStateUser = SystemCPUStateKey.String("user") + // system + SystemCPUStateSystem = SystemCPUStateKey.String("system") + // nice + SystemCPUStateNice = SystemCPUStateKey.String("nice") + // idle + SystemCPUStateIdle = SystemCPUStateKey.String("idle") + // iowait + SystemCPUStateIowait = SystemCPUStateKey.String("iowait") + // interrupt + SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") + // steal + SystemCPUStateSteal = SystemCPUStateKey.String("steal") +) + +// SystemCPULogicalNumber returns an attribute KeyValue conforming to the +// "system.cpu.logical_number" semantic conventions. It represents the logical +// CPU number [0..n-1] +func SystemCPULogicalNumber(val int) attribute.KeyValue { + return SystemCPULogicalNumberKey.Int(val) +} + +// Describes System Memory attributes +const ( + // SystemMemoryStateKey is the attribute Key conforming to the + // "system.memory.state" semantic conventions. It represents the memory + // state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free', 'cached' + SystemMemoryStateKey = attribute.Key("system.memory.state") +) + +var ( + // used + SystemMemoryStateUsed = SystemMemoryStateKey.String("used") + // free + SystemMemoryStateFree = SystemMemoryStateKey.String("free") + // shared + SystemMemoryStateShared = SystemMemoryStateKey.String("shared") + // buffers + SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") + // cached + SystemMemoryStateCached = SystemMemoryStateKey.String("cached") +) + +// Describes System Memory Paging attributes +const ( + // SystemPagingDirectionKey is the attribute Key conforming to the + // "system.paging.direction" semantic conventions. It represents the paging + // access direction + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'in' + SystemPagingDirectionKey = attribute.Key("system.paging.direction") + + // SystemPagingStateKey is the attribute Key conforming to the + // "system.paging.state" semantic conventions. It represents the memory + // paging state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'free' + SystemPagingStateKey = attribute.Key("system.paging.state") + + // SystemPagingTypeKey is the attribute Key conforming to the + // "system.paging.type" semantic conventions. It represents the memory + // paging type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'minor' + SystemPagingTypeKey = attribute.Key("system.paging.type") +) + +var ( + // in + SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") + // out + SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") +) + +var ( + // used + SystemPagingStateUsed = SystemPagingStateKey.String("used") + // free + SystemPagingStateFree = SystemPagingStateKey.String("free") +) + +var ( + // major + SystemPagingTypeMajor = SystemPagingTypeKey.String("major") + // minor + SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") +) + +// Describes Filesystem attributes +const ( + // SystemFilesystemModeKey is the attribute Key conforming to the + // "system.filesystem.mode" semantic conventions. It represents the + // filesystem mode + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'rw, ro' + SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") + + // SystemFilesystemMountpointKey is the attribute Key conforming to the + // "system.filesystem.mountpoint" semantic conventions. It represents the + // filesystem mount path + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/mnt/data' + SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") + + // SystemFilesystemStateKey is the attribute Key conforming to the + // "system.filesystem.state" semantic conventions. It represents the + // filesystem state + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'used' + SystemFilesystemStateKey = attribute.Key("system.filesystem.state") + + // SystemFilesystemTypeKey is the attribute Key conforming to the + // "system.filesystem.type" semantic conventions. It represents the + // filesystem type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'ext4' + SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") +) + +var ( + // used + SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") + // free + SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") + // reserved + SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") +) + +var ( + // fat32 + SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") + // exfat + SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") + // ntfs + SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") + // refs + SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") + // hfsplus + SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") + // ext4 + SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") +) + +// SystemFilesystemMode returns an attribute KeyValue conforming to the +// "system.filesystem.mode" semantic conventions. It represents the filesystem +// mode +func SystemFilesystemMode(val string) attribute.KeyValue { + return SystemFilesystemModeKey.String(val) +} + +// SystemFilesystemMountpoint returns an attribute KeyValue conforming to +// the "system.filesystem.mountpoint" semantic conventions. It represents the +// filesystem mount path +func SystemFilesystemMountpoint(val string) attribute.KeyValue { + return SystemFilesystemMountpointKey.String(val) +} + +// Describes Network attributes +const ( + // SystemNetworkStateKey is the attribute Key conforming to the + // "system.network.state" semantic conventions. It represents a stateless + // protocol MUST NOT set this attribute + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'close_wait' + SystemNetworkStateKey = attribute.Key("system.network.state") +) + +var ( + // close + SystemNetworkStateClose = SystemNetworkStateKey.String("close") + // close_wait + SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") + // closing + SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") + // delete + SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") + // established + SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") + // fin_wait_1 + SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") + // fin_wait_2 + SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") + // last_ack + SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") + // listen + SystemNetworkStateListen = SystemNetworkStateKey.String("listen") + // syn_recv + SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") + // syn_sent + SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") + // time_wait + SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") +) + +// Describes System Process attributes +const ( + // SystemProcessStatusKey is the attribute Key conforming to the + // "system.process.status" semantic conventions. It represents the process + // state, e.g., [Linux Process State + // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'running' + SystemProcessStatusKey = attribute.Key("system.process.status") +) + +var ( + // running + SystemProcessStatusRunning = SystemProcessStatusKey.String("running") + // sleeping + SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping") + // stopped + SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped") + // defunct + SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct") +) + +// Attributes for telemetry SDK. +const ( + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute + // to `opentelemetry`. + // If another SDK, like a fork or a vendor-provided implementation, is + // used, this SDK MUST set the + // `telemetry.sdk.name` attribute to the fully-qualified class or module + // name of this SDK's main entry point + // or another suitable identifier depending on the language. + // The identifier `opentelemetry` is reserved and MUST NOT be used in this + // case. + // All custom identifiers SHOULD be stable across different versions of an + // implementation. + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + + // TelemetryDistroNameKey is the attribute Key conforming to the + // "telemetry.distro.name" semantic conventions. It represents the name of + // the auto instrumentation agent or distribution, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'parts-unlimited-java' + // Note: Official auto instrumentation agents and distributions SHOULD set + // the `telemetry.distro.name` attribute to + // a string starting with `opentelemetry-`, e.g. + // `opentelemetry-java-instrumentation`. + TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") + + // TelemetryDistroVersionKey is the attribute Key conforming to the + // "telemetry.distro.version" semantic conventions. It represents the + // version string of the auto instrumentation agent or distribution, if + // used. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2.3' + TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // rust + TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// TelemetryDistroName returns an attribute KeyValue conforming to the +// "telemetry.distro.name" semantic conventions. It represents the name of the +// auto instrumentation agent or distribution, if used. +func TelemetryDistroName(val string) attribute.KeyValue { + return TelemetryDistroNameKey.String(val) +} + +// TelemetryDistroVersion returns an attribute KeyValue conforming to the +// "telemetry.distro.version" semantic conventions. It represents the version +// string of the auto instrumentation agent or distribution, if used. +func TelemetryDistroVersion(val string) attribute.KeyValue { + return TelemetryDistroVersionKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// Semantic convention attributes in the TLS namespace. +const ( + // TLSCipherKey is the attribute Key conforming to the "tls.cipher" + // semantic conventions. It represents the string indicating the + // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) + // used during the current connection. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', + // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' + // Note: The values allowed for `tls.cipher` MUST be one of the + // `Descriptions` of the [registered TLS Cipher + // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). + TLSCipherKey = attribute.Key("tls.cipher") + + // TLSClientCertificateKey is the attribute Key conforming to the + // "tls.client.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the client. This is + // usually mutually-exclusive of `client.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSClientCertificateKey = attribute.Key("tls.client.certificate") + + // TLSClientCertificateChainKey is the attribute Key conforming to the + // "tls.client.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the client. This is usually mutually-exclusive of + // `client.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") + + // TLSClientHashMd5Key is the attribute Key conforming to the + // "tls.client.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") + + // TLSClientHashSha1Key is the attribute Key conforming to the + // "tls.client.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") + + // TLSClientHashSha256Key is the attribute Key conforming to the + // "tls.client.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the client. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") + + // TLSClientIssuerKey is the attribute Key conforming to the + // "tls.client.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSClientIssuerKey = attribute.Key("tls.client.issuer") + + // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" + // semantic conventions. It represents a hash that identifies clients based + // on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSClientJa3Key = attribute.Key("tls.client.ja3") + + // TLSClientNotAfterKey is the attribute Key conforming to the + // "tls.client.not_after" semantic conventions. It represents the date/Time + // indicating when client certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSClientNotAfterKey = attribute.Key("tls.client.not_after") + + // TLSClientNotBeforeKey is the attribute Key conforming to the + // "tls.client.not_before" semantic conventions. It represents the + // date/Time indicating when client certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") + + // TLSClientServerNameKey is the attribute Key conforming to the + // "tls.client.server_name" semantic conventions. It represents the also + // called an SNI, this tells the server which hostname to which the client + // is attempting to connect to. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'opentelemetry.io' + TLSClientServerNameKey = attribute.Key("tls.client.server_name") + + // TLSClientSubjectKey is the attribute Key conforming to the + // "tls.client.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' + TLSClientSubjectKey = attribute.Key("tls.client.subject") + + // TLSClientSupportedCiphersKey is the attribute Key conforming to the + // "tls.client.supported_ciphers" semantic conventions. It represents the + // array of ciphers offered by the client during the client hello. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' + TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") + + // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic + // conventions. It represents the string indicating the curve used for the + // given cipher, when applicable + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'secp256r1' + TLSCurveKey = attribute.Key("tls.curve") + + // TLSEstablishedKey is the attribute Key conforming to the + // "tls.established" semantic conventions. It represents the boolean flag + // indicating if the TLS negotiation was successful and transitioned to an + // encrypted tunnel. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSEstablishedKey = attribute.Key("tls.established") + + // TLSNextProtocolKey is the attribute Key conforming to the + // "tls.next_protocol" semantic conventions. It represents the string + // indicating the protocol being tunneled. Per the values in the [IANA + // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), + // this string should be lower case. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'http/1.1' + TLSNextProtocolKey = attribute.Key("tls.next_protocol") + + // TLSProtocolNameKey is the attribute Key conforming to the + // "tls.protocol.name" semantic conventions. It represents the normalized + // lowercase protocol name parsed from original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: Enum + // RequirementLevel: Optional + // Stability: experimental + TLSProtocolNameKey = attribute.Key("tls.protocol.name") + + // TLSProtocolVersionKey is the attribute Key conforming to the + // "tls.protocol.version" semantic conventions. It represents the numeric + // part of the version parsed from the original string of the negotiated + // [SSL/TLS protocol + // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1.2', '3' + TLSProtocolVersionKey = attribute.Key("tls.protocol.version") + + // TLSResumedKey is the attribute Key conforming to the "tls.resumed" + // semantic conventions. It represents the boolean flag indicating if this + // TLS connection was resumed from an existing TLS negotiation. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: experimental + // Examples: True + TLSResumedKey = attribute.Key("tls.resumed") + + // TLSServerCertificateKey is the attribute Key conforming to the + // "tls.server.certificate" semantic conventions. It represents the + // pEM-encoded stand-alone certificate offered by the server. This is + // usually mutually-exclusive of `server.certificate_chain` since this + // value also exists in that list. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...' + TLSServerCertificateKey = attribute.Key("tls.server.certificate") + + // TLSServerCertificateChainKey is the attribute Key conforming to the + // "tls.server.certificate_chain" semantic conventions. It represents the + // array of PEM-encoded certificates that make up the certificate chain + // offered by the server. This is usually mutually-exclusive of + // `server.certificate` since that value should be the first certificate in + // the chain. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'MII...', 'MI...' + TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") + + // TLSServerHashMd5Key is the attribute Key conforming to the + // "tls.server.hash.md5" semantic conventions. It represents the + // certificate fingerprint using the MD5 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' + TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") + + // TLSServerHashSha1Key is the attribute Key conforming to the + // "tls.server.hash.sha1" semantic conventions. It represents the + // certificate fingerprint using the SHA1 digest of DER-encoded version of + // certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' + TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") + + // TLSServerHashSha256Key is the attribute Key conforming to the + // "tls.server.hash.sha256" semantic conventions. It represents the + // certificate fingerprint using the SHA256 digest of DER-encoded version + // of certificate offered by the server. For consistency with other hash + // values, this value should be formatted as an uppercase hash. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: + // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' + TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") + + // TLSServerIssuerKey is the attribute Key conforming to the + // "tls.server.issuer" semantic conventions. It represents the + // distinguished name of + // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) + // of the issuer of the x.509 certificate presented by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, + // DC=com' + TLSServerIssuerKey = attribute.Key("tls.server.issuer") + + // TLSServerJa3sKey is the attribute Key conforming to the + // "tls.server.ja3s" semantic conventions. It represents a hash that + // identifies servers based on how they perform an SSL/TLS handshake. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'd4e5b18d6b55c71272893221c96ba240' + TLSServerJa3sKey = attribute.Key("tls.server.ja3s") + + // TLSServerNotAfterKey is the attribute Key conforming to the + // "tls.server.not_after" semantic conventions. It represents the date/Time + // indicating when server certificate is no longer considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '2021-01-01T00:00:00.000Z' + TLSServerNotAfterKey = attribute.Key("tls.server.not_after") + + // TLSServerNotBeforeKey is the attribute Key conforming to the + // "tls.server.not_before" semantic conventions. It represents the + // date/Time indicating when server certificate is first considered valid. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '1970-01-01T00:00:00.000Z' + TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") + + // TLSServerSubjectKey is the attribute Key conforming to the + // "tls.server.subject" semantic conventions. It represents the + // distinguished name of subject of the x.509 certificate presented by the + // server. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' + TLSServerSubjectKey = attribute.Key("tls.server.subject") +) + +var ( + // ssl + TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") + // tls + TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") +) + +// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" +// semantic conventions. It represents the string indicating the +// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used +// during the current connection. +func TLSCipher(val string) attribute.KeyValue { + return TLSCipherKey.String(val) +} + +// TLSClientCertificate returns an attribute KeyValue conforming to the +// "tls.client.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the client. This is usually +// mutually-exclusive of `client.certificate_chain` since this value also +// exists in that list. +func TLSClientCertificate(val string) attribute.KeyValue { + return TLSClientCertificateKey.String(val) +} + +// TLSClientCertificateChain returns an attribute KeyValue conforming to the +// "tls.client.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the client. This is usually mutually-exclusive of `client.certificate` since +// that value should be the first certificate in the chain. +func TLSClientCertificateChain(val ...string) attribute.KeyValue { + return TLSClientCertificateChainKey.StringSlice(val) +} + +// TLSClientHashMd5 returns an attribute KeyValue conforming to the +// "tls.client.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashMd5(val string) attribute.KeyValue { + return TLSClientHashMd5Key.String(val) +} + +// TLSClientHashSha1 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha1(val string) attribute.KeyValue { + return TLSClientHashSha1Key.String(val) +} + +// TLSClientHashSha256 returns an attribute KeyValue conforming to the +// "tls.client.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the client. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSClientHashSha256(val string) attribute.KeyValue { + return TLSClientHashSha256Key.String(val) +} + +// TLSClientIssuer returns an attribute KeyValue conforming to the +// "tls.client.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSClientIssuer(val string) attribute.KeyValue { + return TLSClientIssuerKey.String(val) +} + +// TLSClientJa3 returns an attribute KeyValue conforming to the +// "tls.client.ja3" semantic conventions. It represents a hash that identifies +// clients based on how they perform an SSL/TLS handshake. +func TLSClientJa3(val string) attribute.KeyValue { + return TLSClientJa3Key.String(val) +} + +// TLSClientNotAfter returns an attribute KeyValue conforming to the +// "tls.client.not_after" semantic conventions. It represents the date/Time +// indicating when client certificate is no longer considered valid. +func TLSClientNotAfter(val string) attribute.KeyValue { + return TLSClientNotAfterKey.String(val) +} + +// TLSClientNotBefore returns an attribute KeyValue conforming to the +// "tls.client.not_before" semantic conventions. It represents the date/Time +// indicating when client certificate is first considered valid. +func TLSClientNotBefore(val string) attribute.KeyValue { + return TLSClientNotBeforeKey.String(val) +} + +// TLSClientServerName returns an attribute KeyValue conforming to the +// "tls.client.server_name" semantic conventions. It represents the also called +// an SNI, this tells the server which hostname to which the client is +// attempting to connect to. +func TLSClientServerName(val string) attribute.KeyValue { + return TLSClientServerNameKey.String(val) +} + +// TLSClientSubject returns an attribute KeyValue conforming to the +// "tls.client.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the client. +func TLSClientSubject(val string) attribute.KeyValue { + return TLSClientSubjectKey.String(val) +} + +// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the +// "tls.client.supported_ciphers" semantic conventions. It represents the array +// of ciphers offered by the client during the client hello. +func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { + return TLSClientSupportedCiphersKey.StringSlice(val) +} + +// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" +// semantic conventions. It represents the string indicating the curve used for +// the given cipher, when applicable +func TLSCurve(val string) attribute.KeyValue { + return TLSCurveKey.String(val) +} + +// TLSEstablished returns an attribute KeyValue conforming to the +// "tls.established" semantic conventions. It represents the boolean flag +// indicating if the TLS negotiation was successful and transitioned to an +// encrypted tunnel. +func TLSEstablished(val bool) attribute.KeyValue { + return TLSEstablishedKey.Bool(val) +} + +// TLSNextProtocol returns an attribute KeyValue conforming to the +// "tls.next_protocol" semantic conventions. It represents the string +// indicating the protocol being tunneled. Per the values in the [IANA +// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), +// this string should be lower case. +func TLSNextProtocol(val string) attribute.KeyValue { + return TLSNextProtocolKey.String(val) +} + +// TLSProtocolVersion returns an attribute KeyValue conforming to the +// "tls.protocol.version" semantic conventions. It represents the numeric part +// of the version parsed from the original string of the negotiated [SSL/TLS +// protocol +// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) +func TLSProtocolVersion(val string) attribute.KeyValue { + return TLSProtocolVersionKey.String(val) +} + +// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" +// semantic conventions. It represents the boolean flag indicating if this TLS +// connection was resumed from an existing TLS negotiation. +func TLSResumed(val bool) attribute.KeyValue { + return TLSResumedKey.Bool(val) +} + +// TLSServerCertificate returns an attribute KeyValue conforming to the +// "tls.server.certificate" semantic conventions. It represents the pEM-encoded +// stand-alone certificate offered by the server. This is usually +// mutually-exclusive of `server.certificate_chain` since this value also +// exists in that list. +func TLSServerCertificate(val string) attribute.KeyValue { + return TLSServerCertificateKey.String(val) +} + +// TLSServerCertificateChain returns an attribute KeyValue conforming to the +// "tls.server.certificate_chain" semantic conventions. It represents the array +// of PEM-encoded certificates that make up the certificate chain offered by +// the server. This is usually mutually-exclusive of `server.certificate` since +// that value should be the first certificate in the chain. +func TLSServerCertificateChain(val ...string) attribute.KeyValue { + return TLSServerCertificateChainKey.StringSlice(val) +} + +// TLSServerHashMd5 returns an attribute KeyValue conforming to the +// "tls.server.hash.md5" semantic conventions. It represents the certificate +// fingerprint using the MD5 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashMd5(val string) attribute.KeyValue { + return TLSServerHashMd5Key.String(val) +} + +// TLSServerHashSha1 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha1" semantic conventions. It represents the certificate +// fingerprint using the SHA1 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha1(val string) attribute.KeyValue { + return TLSServerHashSha1Key.String(val) +} + +// TLSServerHashSha256 returns an attribute KeyValue conforming to the +// "tls.server.hash.sha256" semantic conventions. It represents the certificate +// fingerprint using the SHA256 digest of DER-encoded version of certificate +// offered by the server. For consistency with other hash values, this value +// should be formatted as an uppercase hash. +func TLSServerHashSha256(val string) attribute.KeyValue { + return TLSServerHashSha256Key.String(val) +} + +// TLSServerIssuer returns an attribute KeyValue conforming to the +// "tls.server.issuer" semantic conventions. It represents the distinguished +// name of +// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of +// the issuer of the x.509 certificate presented by the client. +func TLSServerIssuer(val string) attribute.KeyValue { + return TLSServerIssuerKey.String(val) +} + +// TLSServerJa3s returns an attribute KeyValue conforming to the +// "tls.server.ja3s" semantic conventions. It represents a hash that identifies +// servers based on how they perform an SSL/TLS handshake. +func TLSServerJa3s(val string) attribute.KeyValue { + return TLSServerJa3sKey.String(val) +} + +// TLSServerNotAfter returns an attribute KeyValue conforming to the +// "tls.server.not_after" semantic conventions. It represents the date/Time +// indicating when server certificate is no longer considered valid. +func TLSServerNotAfter(val string) attribute.KeyValue { + return TLSServerNotAfterKey.String(val) +} + +// TLSServerNotBefore returns an attribute KeyValue conforming to the +// "tls.server.not_before" semantic conventions. It represents the date/Time +// indicating when server certificate is first considered valid. +func TLSServerNotBefore(val string) attribute.KeyValue { + return TLSServerNotBeforeKey.String(val) +} + +// TLSServerSubject returns an attribute KeyValue conforming to the +// "tls.server.subject" semantic conventions. It represents the distinguished +// name of subject of the x.509 certificate presented by the server. +func TLSServerSubject(val string) attribute.KeyValue { + return TLSServerSubjectKey.String(val) +} + +// Attributes describing URL. +const ( + // URLDomainKey is the attribute Key conforming to the "url.domain" + // semantic conventions. It represents the domain extracted from the + // `url.full`, such as "opentelemetry.io". + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2', + // '[1080:0:0:0:8:800:200C:417A]' + // Note: In some cases a URL may refer to an IP and/or port directly, + // without a domain name. In this case, the IP address would go to the + // domain field. If the URL contains a [literal IPv6 + // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by + // `[` and `]`, the `[` and `]` characters should also be captured in the + // domain field. + URLDomainKey = attribute.Key("url.domain") + + // URLExtensionKey is the attribute Key conforming to the "url.extension" + // semantic conventions. It represents the file extension extracted from + // the `url.full`, excluding the leading dot. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'png', 'gz' + // Note: The file extension is only set if it exists, as not every url has + // a file extension. When the file name has multiple extensions + // `example.tar.gz`, only the last one should be captured `gz`, not + // `tar.gz`. + URLExtensionKey = attribute.Key("url.extension") + + // URLFragmentKey is the attribute Key conforming to the "url.fragment" + // semantic conventions. It represents the [URI + // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'SemConv' + URLFragmentKey = attribute.Key("url.fragment") + + // URLFullKey is the attribute Key conforming to the "url.full" semantic + // conventions. It represents the absolute URL describing a network + // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // '//localhost' + // Note: For network calls, URL usually has + // `scheme://host[:port][path][?query][#fragment]` format, where the + // fragment is not transmitted over HTTP, but if it is known, it SHOULD be + // included nevertheless. + // `url.full` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case username and + // password SHOULD be redacted and attribute's value SHOULD be + // `https://REDACTED:REDACTED@www.example.com/`. + // `url.full` SHOULD capture the absolute URL when it is available (or can + // be reconstructed). Sensitive content provided in `url.full` SHOULD be + // scrubbed when instrumentations can identify it. + URLFullKey = attribute.Key("url.full") + + // URLOriginalKey is the attribute Key conforming to the "url.original" + // semantic conventions. It represents the unmodified original URL as seen + // in the event source. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', + // 'search?q=OpenTelemetry' + // Note: In network monitoring, the observed URL may be a full URL, whereas + // in access logs, the URL is often just represented as a path. This field + // is meant to represent the URL as it was observed, complete or not. + // `url.original` might contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case password and + // username SHOULD NOT be redacted and attribute's value SHOULD remain the + // same. + URLOriginalKey = attribute.Key("url.original") + + // URLPathKey is the attribute Key conforming to the "url.path" semantic + // conventions. It represents the [URI + // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/search' + // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when + // instrumentations can identify it. + URLPathKey = attribute.Key("url.path") + + // URLPortKey is the attribute Key conforming to the "url.port" semantic + // conventions. It represents the port extracted from the `url.full` + // + // Type: int + // RequirementLevel: Optional + // Stability: experimental + // Examples: 443 + URLPortKey = attribute.Key("url.port") + + // URLQueryKey is the attribute Key conforming to the "url.query" semantic + // conventions. It represents the [URI + // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'q=OpenTelemetry' + // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when + // instrumentations can identify it. + URLQueryKey = attribute.Key("url.query") + + // URLRegisteredDomainKey is the attribute Key conforming to the + // "url.registered_domain" semantic conventions. It represents the highest + // registered url domain, stripped of the subdomain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'example.com', 'foo.co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). For example, the registered domain for + // `foo.example.com` is `example.com`. Trying to approximate this by simply + // taking the last two labels will not work well for TLDs such as `co.uk`. + URLRegisteredDomainKey = attribute.Key("url.registered_domain") + + // URLSchemeKey is the attribute Key conforming to the "url.scheme" + // semantic conventions. It represents the [URI + // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component + // identifying the used protocol. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'https', 'ftp', 'telnet' + URLSchemeKey = attribute.Key("url.scheme") + + // URLSubdomainKey is the attribute Key conforming to the "url.subdomain" + // semantic conventions. It represents the subdomain portion of a fully + // qualified domain name includes all of the names except the host name + // under the registered_domain. In a partially qualified domain, or if the + // qualification level of the full name cannot be determined, subdomain + // contains all of the names below the registered domain. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'east', 'sub2.sub1' + // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If + // the domain has multiple levels of subdomain, such as + // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`, + // with no trailing period. + URLSubdomainKey = attribute.Key("url.subdomain") + + // URLTemplateKey is the attribute Key conforming to the "url.template" + // semantic conventions. It represents the low-cardinality template of an + // [absolute path + // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '/users/{id}', '/users/:id', '/users?id={id}' + URLTemplateKey = attribute.Key("url.template") + + // URLTopLevelDomainKey is the attribute Key conforming to the + // "url.top_level_domain" semantic conventions. It represents the effective + // top level domain (eTLD), also known as the domain suffix, is the last + // part of the domain name. For example, the top level domain for + // example.com is `com`. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'com', 'co.uk' + // Note: This value can be determined precisely with the [public suffix + // list](http://publicsuffix.org). + URLTopLevelDomainKey = attribute.Key("url.top_level_domain") +) + +// URLDomain returns an attribute KeyValue conforming to the "url.domain" +// semantic conventions. It represents the domain extracted from the +// `url.full`, such as "opentelemetry.io". +func URLDomain(val string) attribute.KeyValue { + return URLDomainKey.String(val) +} + +// URLExtension returns an attribute KeyValue conforming to the +// "url.extension" semantic conventions. It represents the file extension +// extracted from the `url.full`, excluding the leading dot. +func URLExtension(val string) attribute.KeyValue { + return URLExtensionKey.String(val) +} + +// URLFragment returns an attribute KeyValue conforming to the +// "url.fragment" semantic conventions. It represents the [URI +// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component +func URLFragment(val string) attribute.KeyValue { + return URLFragmentKey.String(val) +} + +// URLFull returns an attribute KeyValue conforming to the "url.full" +// semantic conventions. It represents the absolute URL describing a network +// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) +func URLFull(val string) attribute.KeyValue { + return URLFullKey.String(val) +} + +// URLOriginal returns an attribute KeyValue conforming to the +// "url.original" semantic conventions. It represents the unmodified original +// URL as seen in the event source. +func URLOriginal(val string) attribute.KeyValue { + return URLOriginalKey.String(val) +} + +// URLPath returns an attribute KeyValue conforming to the "url.path" +// semantic conventions. It represents the [URI +// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component +func URLPath(val string) attribute.KeyValue { + return URLPathKey.String(val) +} + +// URLPort returns an attribute KeyValue conforming to the "url.port" +// semantic conventions. It represents the port extracted from the `url.full` +func URLPort(val int) attribute.KeyValue { + return URLPortKey.Int(val) +} + +// URLQuery returns an attribute KeyValue conforming to the "url.query" +// semantic conventions. It represents the [URI +// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component +func URLQuery(val string) attribute.KeyValue { + return URLQueryKey.String(val) +} + +// URLRegisteredDomain returns an attribute KeyValue conforming to the +// "url.registered_domain" semantic conventions. It represents the highest +// registered url domain, stripped of the subdomain. +func URLRegisteredDomain(val string) attribute.KeyValue { + return URLRegisteredDomainKey.String(val) +} + +// URLScheme returns an attribute KeyValue conforming to the "url.scheme" +// semantic conventions. It represents the [URI +// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component +// identifying the used protocol. +func URLScheme(val string) attribute.KeyValue { + return URLSchemeKey.String(val) +} + +// URLSubdomain returns an attribute KeyValue conforming to the +// "url.subdomain" semantic conventions. It represents the subdomain portion of +// a fully qualified domain name includes all of the names except the host name +// under the registered_domain. In a partially qualified domain, or if the +// qualification level of the full name cannot be determined, subdomain +// contains all of the names below the registered domain. +func URLSubdomain(val string) attribute.KeyValue { + return URLSubdomainKey.String(val) +} + +// URLTemplate returns an attribute KeyValue conforming to the +// "url.template" semantic conventions. It represents the low-cardinality +// template of an [absolute path +// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2). +func URLTemplate(val string) attribute.KeyValue { + return URLTemplateKey.String(val) +} + +// URLTopLevelDomain returns an attribute KeyValue conforming to the +// "url.top_level_domain" semantic conventions. It represents the effective top +// level domain (eTLD), also known as the domain suffix, is the last part of +// the domain name. For example, the top level domain for example.com is `com`. +func URLTopLevelDomain(val string) attribute.KeyValue { + return URLTopLevelDomainKey.String(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentNameKey is the attribute Key conforming to the + // "user_agent.name" semantic conventions. It represents the name of the + // user-agent extracted from original. Usually refers to the browser's + // name. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'Safari', 'YourApp' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's name + // from original string. In the case of using a user-agent for non-browser + // products, such as microservices with multiple names/versions inside the + // `user_agent.original`, the most significant name SHOULD be selected. In + // such a scenario it should align with `user_agent.version` + UserAgentNameKey = attribute.Key("user_agent.name") + + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU + // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) + // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0 + // grpc-java-okhttp/1.27.2' + UserAgentOriginalKey = attribute.Key("user_agent.original") + + // UserAgentVersionKey is the attribute Key conforming to the + // "user_agent.version" semantic conventions. It represents the version of + // the user-agent extracted from original. Usually refers to the browser's + // version + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '14.1.2', '1.0.0' + // Note: [Example](https://www.whatsmyua.info) of extracting browser's + // version from original string. In the case of using a user-agent for + // non-browser products, such as microservices with multiple names/versions + // inside the `user_agent.original`, the most significant version SHOULD be + // selected. In such a scenario it should align with `user_agent.name` + UserAgentVersionKey = attribute.Key("user_agent.version") +) + +// UserAgentName returns an attribute KeyValue conforming to the +// "user_agent.name" semantic conventions. It represents the name of the +// user-agent extracted from original. Usually refers to the browser's name. +func UserAgentName(val string) attribute.KeyValue { + return UserAgentNameKey.String(val) +} + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} + +// UserAgentVersion returns an attribute KeyValue conforming to the +// "user_agent.version" semantic conventions. It represents the version of the +// user-agent extracted from original. Usually refers to the browser's version +func UserAgentVersion(val string) attribute.KeyValue { + return UserAgentVersionKey.String(val) +} + +// The attributes used to describe the packaged software running the +// application code. +const ( + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") + + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: experimental + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") +) + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go similarity index 96% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go index d27e8a8f..d031bbea 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go @@ -4,6 +4,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 +// patterns for OpenTelemetry things. This package represents the v1.26.0 // version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go similarity index 98% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go index 7235bb51..bfaee0d5 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" const ( // ExceptionEventName is the name of the Span event representing an exception. diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go similarity index 77% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go index a6b953f6..fcdb9f48 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go @@ -3,109 +3,259 @@ // Code generated from semantic convention specification. DO NOT EDIT. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" const ( - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the number + // ContainerCPUTime is the metric conforming to the "container.cpu.time" + // semantic conventions. It represents the total CPU time consumed. + // Instrument: counter + // Unit: s + // Stability: Experimental + ContainerCPUTimeName = "container.cpu.time" + ContainerCPUTimeUnit = "s" + ContainerCPUTimeDescription = "Total CPU time consumed" + + // ContainerMemoryUsage is the metric conforming to the + // "container.memory.usage" semantic conventions. It represents the memory + // usage of the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerMemoryUsageName = "container.memory.usage" + ContainerMemoryUsageUnit = "By" + ContainerMemoryUsageDescription = "Memory usage of the container." + + // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic + // conventions. It represents the disk bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerDiskIoName = "container.disk.io" + ContainerDiskIoUnit = "By" + ContainerDiskIoDescription = "Disk bytes for the container." + + // ContainerNetworkIo is the metric conforming to the "container.network.io" + // semantic conventions. It represents the network bytes for the container. + // Instrument: counter + // Unit: By + // Stability: Experimental + ContainerNetworkIoName = "container.network.io" + ContainerNetworkIoUnit = "By" + ContainerNetworkIoDescription = "Network bytes for the container." + + // DBClientOperationDuration is the metric conforming to the + // "db.client.operation.duration" semantic conventions. It represents the + // duration of database client operations. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientOperationDurationName = "db.client.operation.duration" + DBClientOperationDurationUnit = "s" + DBClientOperationDurationDescription = "Duration of database client operations." + + // DBClientConnectionCount is the metric conforming to the + // "db.client.connection.count" semantic conventions. It represents the number // of connections that are currently in state described by the `state` // attribute. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental + DBClientConnectionCountName = "db.client.connection.count" + DBClientConnectionCountUnit = "{connection}" + DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute" + + // DBClientConnectionIdleMax is the metric conforming to the + // "db.client.connection.idle.max" semantic conventions. It represents the + // maximum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMaxName = "db.client.connection.idle.max" + DBClientConnectionIdleMaxUnit = "{connection}" + DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed" + + // DBClientConnectionIdleMin is the metric conforming to the + // "db.client.connection.idle.min" semantic conventions. It represents the + // minimum number of idle open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionIdleMinName = "db.client.connection.idle.min" + DBClientConnectionIdleMinUnit = "{connection}" + DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed" + + // DBClientConnectionMax is the metric conforming to the + // "db.client.connection.max" semantic conventions. It represents the maximum + // number of open connections allowed. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + DBClientConnectionMaxName = "db.client.connection.max" + DBClientConnectionMaxUnit = "{connection}" + DBClientConnectionMaxDescription = "The maximum number of open connections allowed" + + // DBClientConnectionPendingRequests is the metric conforming to the + // "db.client.connection.pending_requests" semantic conventions. It represents + // the number of pending requests for an open connection, cumulative for the + // entire pool. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests" + DBClientConnectionPendingRequestsUnit = "{request}" + DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + + // DBClientConnectionTimeouts is the metric conforming to the + // "db.client.connection.timeouts" semantic conventions. It represents the + // number of connection timeouts that have occurred trying to obtain a + // connection from the pool. + // Instrument: counter + // Unit: {timeout} + // Stability: Experimental + DBClientConnectionTimeoutsName = "db.client.connection.timeouts" + DBClientConnectionTimeoutsUnit = "{timeout}" + DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + + // DBClientConnectionCreateTime is the metric conforming to the + // "db.client.connection.create_time" semantic conventions. It represents the + // time it took to create a new connection. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionCreateTimeName = "db.client.connection.create_time" + DBClientConnectionCreateTimeUnit = "s" + DBClientConnectionCreateTimeDescription = "The time it took to create a new connection" + + // DBClientConnectionWaitTime is the metric conforming to the + // "db.client.connection.wait_time" semantic conventions. It represents the + // time it took to obtain an open connection from the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionWaitTimeName = "db.client.connection.wait_time" + DBClientConnectionWaitTimeUnit = "s" + DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool" + + // DBClientConnectionUseTime is the metric conforming to the + // "db.client.connection.use_time" semantic conventions. It represents the time + // between borrowing a connection and returning it to the pool. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DBClientConnectionUseTimeName = "db.client.connection.use_time" + DBClientConnectionUseTimeUnit = "s" + DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + + // DBClientConnectionsUsage is the metric conforming to the + // "db.client.connections.usage" semantic conventions. It represents the + // deprecated, use `db.client.connection.count` instead. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental DBClientConnectionsUsageName = "db.client.connections.usage" DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" + DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead." // DBClientConnectionsIdleMax is the metric conforming to the // "db.client.connections.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. + // deprecated, use `db.client.connection.idle.max` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" + DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead." // DBClientConnectionsIdleMin is the metric conforming to the // "db.client.connections.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. + // deprecated, use `db.client.connection.idle.min` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsIdleMinName = "db.client.connections.idle.min" DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" + DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead." // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the maximum - // number of open connections allowed. + // "db.client.connections.max" semantic conventions. It represents the + // deprecated, use `db.client.connection.max` instead. // Instrument: updowncounter // Unit: {connection} // Stability: Experimental DBClientConnectionsMaxName = "db.client.connections.max" DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" + DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead." // DBClientConnectionsPendingRequests is the metric conforming to the // "db.client.connections.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. + // the deprecated, use `db.client.connection.pending_requests` instead. // Instrument: updowncounter // Unit: {request} // Stability: Experimental DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" + DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead." // DBClientConnectionsTimeouts is the metric conforming to the // "db.client.connections.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. + // deprecated, use `db.client.connection.timeouts` instead. // Instrument: counter // Unit: {timeout} // Stability: Experimental DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" + DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead." // DBClientConnectionsCreateTime is the metric conforming to the // "db.client.connections.create_time" semantic conventions. It represents the - // time it took to create a new connection. + // deprecated, use `db.client.connection.create_time` instead. Note: the unit + // also changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsCreateTimeName = "db.client.connections.create_time" DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" + DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`." // DBClientConnectionsWaitTime is the metric conforming to the // "db.client.connections.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. + // deprecated, use `db.client.connection.wait_time` instead. Note: the unit + // also changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" + DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`." // DBClientConnectionsUseTime is the metric conforming to the // "db.client.connections.use_time" semantic conventions. It represents the - // time between borrowing a connection and returning it to the pool. + // deprecated, use `db.client.connection.use_time` instead. Note: the unit also + // changed from `ms` to `s`. // Instrument: histogram // Unit: ms // Stability: Experimental DBClientConnectionsUseTimeName = "db.client.connections.use_time" DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" + DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`." + + // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" + // semantic conventions. It represents the measures the time taken to perform a + // DNS lookup. + // Instrument: histogram + // Unit: s + // Stability: Experimental + DNSLookupDurationName = "dns.lookup.duration" + DNSLookupDurationUnit = "s" + DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." // AspnetcoreRoutingMatchAttempts is the metric conforming to the // "aspnetcore.routing.match_attempts" semantic conventions. It represents the // number of requests that were attempted to be matched to an endpoint. // Instrument: counter // Unit: {match_attempt} - // Stability: Experimental + // Stability: Stable AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." @@ -115,7 +265,7 @@ const ( // number of exceptions caught by exception handling middleware. // Instrument: counter // Unit: {exception} - // Stability: Experimental + // Stability: Stable AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" AspnetcoreDiagnosticsExceptionsUnit = "{exception}" AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." @@ -126,7 +276,7 @@ const ( // that hold a rate limiting lease. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." @@ -137,7 +287,7 @@ const ( // server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." @@ -148,7 +298,7 @@ const ( // limiting lease. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." @@ -159,7 +309,7 @@ const ( // acquire a rate limiting lease. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." @@ -169,69 +319,17 @@ const ( // number of requests that tried to acquire a rate limiting lease. // Instrument: counter // Unit: {request} - // Stability: Experimental + // Stability: Stable AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" AspnetcoreRateLimitingRequestsUnit = "{request}" AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // HTTPClientRequestTimeInQueue is the metric conforming to the - // "http.client.request.time_in_queue" semantic conventions. It represents the - // amount of time requests spent on a queue waiting for an available - // connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" - HTTPClientRequestTimeInQueueUnit = "s" - HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." - // KestrelActiveConnections is the metric conforming to the // "kestrel.active_connections" semantic conventions. It represents the number // of connections that are currently active on the server. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelActiveConnectionsName = "kestrel.active_connections" KestrelActiveConnectionsUnit = "{connection}" KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." @@ -241,7 +339,7 @@ const ( // duration of connections on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable KestrelConnectionDurationName = "kestrel.connection.duration" KestrelConnectionDurationUnit = "s" KestrelConnectionDurationDescription = "The duration of connections on the server." @@ -251,7 +349,7 @@ const ( // number of connections rejected by the server. // Instrument: counter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelRejectedConnectionsName = "kestrel.rejected_connections" KestrelRejectedConnectionsUnit = "{connection}" KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." @@ -261,7 +359,7 @@ const ( // of connections that are currently queued and are waiting to start. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelQueuedConnectionsName = "kestrel.queued_connections" KestrelQueuedConnectionsUnit = "{connection}" KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." @@ -272,7 +370,7 @@ const ( // currently queued and are waiting to start. // Instrument: updowncounter // Unit: {request} - // Stability: Experimental + // Stability: Stable KestrelQueuedRequestsName = "kestrel.queued_requests" KestrelQueuedRequestsUnit = "{request}" KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." @@ -282,7 +380,7 @@ const ( // number of connections that are currently upgraded (WebSockets). . // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" KestrelUpgradedConnectionsUnit = "{connection}" KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." @@ -292,7 +390,7 @@ const ( // duration of TLS handshakes on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" KestrelTLSHandshakeDurationUnit = "s" KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." @@ -302,7 +400,7 @@ const ( // number of TLS handshakes that are currently in progress on the server. // Instrument: updowncounter // Unit: {handshake} - // Stability: Experimental + // Stability: Stable KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" KestrelActiveTLSHandshakesUnit = "{handshake}" KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." @@ -312,7 +410,7 @@ const ( // duration of connections on the server. // Instrument: histogram // Unit: s - // Stability: Experimental + // Stability: Stable SignalrServerConnectionDurationName = "signalr.server.connection.duration" SignalrServerConnectionDurationUnit = "s" SignalrServerConnectionDurationDescription = "The duration of connections on the server." @@ -322,7 +420,7 @@ const ( // number of connections that are currently active on the server. // Instrument: updowncounter // Unit: {connection} - // Stability: Experimental + // Stability: Stable SignalrServerActiveConnectionsName = "signalr.server.active_connections" SignalrServerActiveConnectionsUnit = "{connection}" SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." @@ -481,6 +579,37 @@ const ( HTTPClientResponseBodySizeUnit = "By" HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." + // HTTPClientOpenConnections is the metric conforming to the + // "http.client.open_connections" semantic conventions. It represents the + // number of outbound HTTP connections that are currently active or idle on the + // client. + // Instrument: updowncounter + // Unit: {connection} + // Stability: Experimental + HTTPClientOpenConnectionsName = "http.client.open_connections" + HTTPClientOpenConnectionsUnit = "{connection}" + HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." + + // HTTPClientConnectionDuration is the metric conforming to the + // "http.client.connection.duration" semantic conventions. It represents the + // duration of the successfully established outbound HTTP connections. + // Instrument: histogram + // Unit: s + // Stability: Experimental + HTTPClientConnectionDurationName = "http.client.connection.duration" + HTTPClientConnectionDurationUnit = "s" + HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." + + // HTTPClientActiveRequests is the metric conforming to the + // "http.client.active_requests" semantic conventions. It represents the number + // of active HTTP requests. + // Instrument: updowncounter + // Unit: {request} + // Stability: Experimental + HTTPClientActiveRequestsName = "http.client.active_requests" + HTTPClientActiveRequestsUnit = "{request}" + HTTPClientActiveRequestsDescription = "Number of active HTTP requests." + // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic // conventions. It represents the measure of initial memory requested. // Instrument: updowncounter @@ -673,15 +802,15 @@ const ( MessagingReceiveDurationUnit = "s" MessagingReceiveDurationDescription = "Measures the duration of receive operation." - // MessagingDeliverDuration is the metric conforming to the - // "messaging.deliver.duration" semantic conventions. It represents the - // measures the duration of deliver operation. + // MessagingProcessDuration is the metric conforming to the + // "messaging.process.duration" semantic conventions. It represents the + // measures the duration of process operation. // Instrument: histogram // Unit: s // Stability: Experimental - MessagingDeliverDurationName = "messaging.deliver.duration" - MessagingDeliverDurationUnit = "s" - MessagingDeliverDurationDescription = "Measures the duration of deliver operation." + MessagingProcessDurationName = "messaging.process.duration" + MessagingProcessDurationUnit = "s" + MessagingProcessDurationDescription = "Measures the duration of process operation." // MessagingPublishMessages is the metric conforming to the // "messaging.publish.messages" semantic conventions. It represents the @@ -703,15 +832,112 @@ const ( MessagingReceiveMessagesUnit = "{message}" MessagingReceiveMessagesDescription = "Measures the number of received messages." - // MessagingDeliverMessages is the metric conforming to the - // "messaging.deliver.messages" semantic conventions. It represents the - // measures the number of delivered messages. + // MessagingProcessMessages is the metric conforming to the + // "messaging.process.messages" semantic conventions. It represents the + // measures the number of processed messages. // Instrument: counter // Unit: {message} // Stability: Experimental - MessagingDeliverMessagesName = "messaging.deliver.messages" - MessagingDeliverMessagesUnit = "{message}" - MessagingDeliverMessagesDescription = "Measures the number of delivered messages." + MessagingProcessMessagesName = "messaging.process.messages" + MessagingProcessMessagesUnit = "{message}" + MessagingProcessMessagesDescription = "Measures the number of processed messages." + + // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic + // conventions. It represents the total CPU seconds broken down by different + // states. + // Instrument: counter + // Unit: s + // Stability: Experimental + ProcessCPUTimeName = "process.cpu.time" + ProcessCPUTimeUnit = "s" + ProcessCPUTimeDescription = "Total CPU seconds broken down by different states." + + // ProcessCPUUtilization is the metric conforming to the + // "process.cpu.utilization" semantic conventions. It represents the difference + // in process.cpu.time since the last measurement, divided by the elapsed time + // and number of CPUs available to the process. + // Instrument: gauge + // Unit: 1 + // Stability: Experimental + ProcessCPUUtilizationName = "process.cpu.utilization" + ProcessCPUUtilizationUnit = "1" + ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process." + + // ProcessMemoryUsage is the metric conforming to the "process.memory.usage" + // semantic conventions. It represents the amount of physical memory in use. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryUsageName = "process.memory.usage" + ProcessMemoryUsageUnit = "By" + ProcessMemoryUsageDescription = "The amount of physical memory in use." + + // ProcessMemoryVirtual is the metric conforming to the + // "process.memory.virtual" semantic conventions. It represents the amount of + // committed virtual memory. + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + ProcessMemoryVirtualName = "process.memory.virtual" + ProcessMemoryVirtualUnit = "By" + ProcessMemoryVirtualDescription = "The amount of committed virtual memory." + + // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic + // conventions. It represents the disk bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessDiskIoName = "process.disk.io" + ProcessDiskIoUnit = "By" + ProcessDiskIoDescription = "Disk bytes transferred." + + // ProcessNetworkIo is the metric conforming to the "process.network.io" + // semantic conventions. It represents the network bytes transferred. + // Instrument: counter + // Unit: By + // Stability: Experimental + ProcessNetworkIoName = "process.network.io" + ProcessNetworkIoUnit = "By" + ProcessNetworkIoDescription = "Network bytes transferred." + + // ProcessThreadCount is the metric conforming to the "process.thread.count" + // semantic conventions. It represents the process threads count. + // Instrument: updowncounter + // Unit: {thread} + // Stability: Experimental + ProcessThreadCountName = "process.thread.count" + ProcessThreadCountUnit = "{thread}" + ProcessThreadCountDescription = "Process threads count." + + // ProcessOpenFileDescriptorCount is the metric conforming to the + // "process.open_file_descriptor.count" semantic conventions. It represents the + // number of file descriptors in use by the process. + // Instrument: updowncounter + // Unit: {count} + // Stability: Experimental + ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count" + ProcessOpenFileDescriptorCountUnit = "{count}" + ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process." + + // ProcessContextSwitches is the metric conforming to the + // "process.context_switches" semantic conventions. It represents the number of + // times the process has been context switched. + // Instrument: counter + // Unit: {count} + // Stability: Experimental + ProcessContextSwitchesName = "process.context_switches" + ProcessContextSwitchesUnit = "{count}" + ProcessContextSwitchesDescription = "Number of times the process has been context switched." + + // ProcessPagingFaults is the metric conforming to the "process.paging.faults" + // semantic conventions. It represents the number of page faults the process + // has made. + // Instrument: counter + // Unit: {fault} + // Stability: Experimental + ProcessPagingFaultsName = "process.paging.faults" + ProcessPagingFaultsUnit = "{fault}" + ProcessPagingFaultsDescription = "Number of page faults the process has made." // RPCServerDuration is the metric conforming to the "rpc.server.duration" // semantic conventions. It represents the measures the duration of inbound @@ -883,6 +1109,16 @@ const ( SystemMemoryLimitUnit = "By" SystemMemoryLimitDescription = "Total memory available in the system." + // SystemMemoryShared is the metric conforming to the "system.memory.shared" + // semantic conventions. It represents the shared memory used (mostly by + // tmpfs). + // Instrument: updowncounter + // Unit: By + // Stability: Experimental + SystemMemorySharedName = "system.memory.shared" + SystemMemorySharedUnit = "By" + SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)." + // SystemMemoryUtilization is the metric conforming to the // "system.memory.utilization" semantic conventions. // Instrument: gauge @@ -1038,25 +1274,25 @@ const ( SystemNetworkConnectionsName = "system.network.connections" SystemNetworkConnectionsUnit = "{connection}" - // SystemProcessesCount is the metric conforming to the - // "system.processes.count" semantic conventions. It represents the total - // number of processes in each state. + // SystemProcessCount is the metric conforming to the "system.process.count" + // semantic conventions. It represents the total number of processes in each + // state. // Instrument: updowncounter // Unit: {process} // Stability: Experimental - SystemProcessesCountName = "system.processes.count" - SystemProcessesCountUnit = "{process}" - SystemProcessesCountDescription = "Total number of processes in each state" + SystemProcessCountName = "system.process.count" + SystemProcessCountUnit = "{process}" + SystemProcessCountDescription = "Total number of processes in each state" - // SystemProcessesCreated is the metric conforming to the - // "system.processes.created" semantic conventions. It represents the total + // SystemProcessCreated is the metric conforming to the + // "system.process.created" semantic conventions. It represents the total // number of processes created over uptime of the host. // Instrument: counter // Unit: {process} // Stability: Experimental - SystemProcessesCreatedName = "system.processes.created" - SystemProcessesCreatedUnit = "{process}" - SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" + SystemProcessCreatedName = "system.process.created" + SystemProcessCreatedUnit = "{process}" + SystemProcessCreatedDescription = "Total number of processes created over uptime of the host" // SystemLinuxMemoryAvailable is the metric conforming to the // "system.linux.memory.available" semantic conventions. It represents an diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go similarity index 85% rename from vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go index fe80b173..4c87c7ad 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go @@ -1,9 +1,9 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" +package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.26.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go index 5650a174..8c45a710 100644 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -22,7 +22,7 @@ func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Cont return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) } -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly // as a remote SpanContext and as the current Span. The Span implementation // that wraps rsc is non-recording and performs no operations other than to // return rsc as the SpanContext from the SpanContext method. diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index d661c5d1..cdbf41d6 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -96,7 +96,7 @@ can embed the API interface directly. This option is not recommended. It will lead to publishing packages that contain runtime panics when users update to newer versions of -[go.opentelemetry.io/otel/trace], which may be done with a trasitive +[go.opentelemetry.io/otel/trace], which may be done with a transitive dependency. Finally, an author can embed another implementation in theirs. The embedded diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md new file mode 100644 index 00000000..cd382c82 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md @@ -0,0 +1,3 @@ +# Trace Noop + +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/trace/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop) diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go new file mode 100644 index 00000000..64a4f1b3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package noop provides an implementation of the OpenTelemetry trace API that +// produces no telemetry and minimizes used computation resources. +// +// Using this package to implement the OpenTelemetry trace API will effectively +// disable OpenTelemetry. +// +// This implementation can be embedded in other implementations of the +// OpenTelemetry trace API. Doing so will mean the implementation defaults to +// no operation for methods it does not implement. +package noop // import "go.opentelemetry.io/otel/trace/noop" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/embedded" +) + +var ( + // Compile-time check this implements the OpenTelemetry API. + + _ trace.TracerProvider = TracerProvider{} + _ trace.Tracer = Tracer{} + _ trace.Span = Span{} +) + +// TracerProvider is an OpenTelemetry No-Op TracerProvider. +type TracerProvider struct{ embedded.TracerProvider } + +// NewTracerProvider returns a TracerProvider that does not record any telemetry. +func NewTracerProvider() TracerProvider { + return TracerProvider{} +} + +// Tracer returns an OpenTelemetry Tracer that does not record any telemetry. +func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { + return Tracer{} +} + +// Tracer is an OpenTelemetry No-Op Tracer. +type Tracer struct{ embedded.Tracer } + +// Start creates a span. The created span will be set in a child context of ctx +// and returned with the span. +// +// If ctx contains a span context, the returned span will also contain that +// span context. If the span context in ctx is for a non-recording span, that +// span instance will be returned directly. +func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) { + span := trace.SpanFromContext(ctx) + + // If the parent context contains a non-zero span context, that span + // context needs to be returned as a non-recording span + // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk). + var zeroSC trace.SpanContext + if sc := span.SpanContext(); !sc.Equal(zeroSC) { + if !span.IsRecording() { + // If the span is not recording return it directly. + return ctx, span + } + // Otherwise, return the span context needs in a non-recording span. + span = Span{sc: sc} + } else { + // No parent, return a No-Op span with an empty span context. + span = noopSpanInstance + } + return trace.ContextWithSpan(ctx, span), span +} + +var noopSpanInstance trace.Span = Span{} + +// Span is an OpenTelemetry No-Op Span. +type Span struct { + embedded.Span + + sc trace.SpanContext +} + +// SpanContext returns an empty span context. +func (s Span) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (Span) IsRecording() bool { return false } + +// SetStatus does nothing. +func (Span) SetStatus(codes.Code, string) {} + +// SetAttributes does nothing. +func (Span) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (Span) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (Span) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (Span) AddEvent(string, ...trace.EventOption) {} + +// AddLink does nothing. +func (Span) AddLink(trace.Link) {} + +// SetName does nothing. +func (Span) SetName(string) {} + +// TracerProvider returns a No-Op TracerProvider. +func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 00000000..ef85cb70 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 00000000..d3aa476e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4a..d49adf67 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 00000000..77952d2a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf24..dc5e34ca 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57f..00000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 00000000..c9b7cdbb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab289605..6d3c7b1f 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.31.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82..cdebdb5e 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.31.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,21 +29,21 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.53.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.7.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.8 + version: v0.0.10 modules: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go new file mode 100644 index 00000000..c1af04e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,367 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/trace/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ExportTraceServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *ExportTraceServiceRequest) Reset() { + *x = ExportTraceServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceRequest) ProtoMessage() {} + +func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` +} + +func (x *ExportTraceServiceResponse) Reset() { + *x = ExportTraceServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceResponse) ProtoMessage() {} + +func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} +} + +func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportTracePartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportTracePartialSuccess) Reset() { + *x = ExportTracePartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTracePartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTracePartialSuccess) ProtoMessage() {} + +func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 { + if x != nil { + return x.RejectedSpans + } + return 0 +} + +func (x *ExportTracePartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f, + 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, + 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, + 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc +) + +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData) + }) + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData +} + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ + (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans +} +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { + if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTracePartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes, + }.Build() + File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 00000000..bb1bd261 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go new file mode 100644 index 00000000..dd1b73f1 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -0,0 +1,109 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.21.6 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +// All implementations must embed UnimplementedTraceServiceServer +// for forward compatibility +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) + mustEmbedUnimplementedTraceServiceServer() +} + +// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} +func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {} + +// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TraceServiceServer will +// result in compilation errors. +type UnsafeTraceServiceServer interface { + mustEmbedUnimplementedTraceServiceServer() +} + +func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) { + s.RegisterService(&TraceService_ServiceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TraceService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go new file mode 100644 index 00000000..852209b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -0,0 +1,630 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + // + // Types that are assignable to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + // *AnyValue_BytesValue + Value isAnyValue_Value `protobuf_oneof:"value"` +} + +func (x *AnyValue) Reset() { + *x = AnyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyValue) ProtoMessage() {} + +func (x *AnyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead. +func (*AnyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0} +} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *AnyValue) GetStringValue() string { + if x, ok := x.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *AnyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *AnyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (x *AnyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (x *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +func (x *AnyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*AnyValue_BytesValue); ok { + return x.BytesValue + } + return nil +} + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"` +} + +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"` +} + +type AnyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} + +func (*AnyValue_BoolValue) isAnyValue_Value() {} + +func (*AnyValue_IntValue) isAnyValue_Value() {} + +func (*AnyValue_DoubleValue) isAnyValue_Value() {} + +func (*AnyValue_ArrayValue) isAnyValue_Value() {} + +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (*AnyValue_BytesValue) isAnyValue_Value() {} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ArrayValue) Reset() { + *x = ArrayValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArrayValue) ProtoMessage() {} + +func (x *ArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead. +func (*ArrayValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1} +} + +func (x *ArrayValue) GetValues() []*AnyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *KeyValueList) Reset() { + *x = KeyValueList{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueList) ProtoMessage() {} + +func (x *KeyValueList) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead. +func (*KeyValueList) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2} +} + +func (x *KeyValueList) GetValues() []*KeyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() *AnyValue { + if x != nil { + return x.Value + } + return nil +} + +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +type InstrumentationScope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An empty instrumentation scope name means the name is unknown. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Additional attributes that describe the scope. [Optional]. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *InstrumentationScope) Reset() { + *x = InstrumentationScope{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstrumentationScope) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstrumentationScope) ProtoMessage() {} + +func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead. +func (*InstrumentationScope) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} +} + +func (x *InstrumentationScope) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InstrumentationScope) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *InstrumentationScope) GetAttributes() []*KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08, + 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, + 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, + 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, + 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, + 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b, + 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once + file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc +) + +func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData) + }) + return file_opentelemetry_proto_common_v1_common_proto_rawDescData +} + +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope +} +var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue + 2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList + 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue + 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_common_v1_common_proto_init() } +func file_opentelemetry_proto_common_v1_common_proto_init() { + if File_opentelemetry_proto_common_v1_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ArrayValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstrumentationScope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + (*AnyValue_BytesValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_common_v1_common_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_common_v1_common_proto_msgTypes, + }.Build() + File_opentelemetry_proto_common_v1_common_proto = out.File + file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil + file_opentelemetry_proto_common_v1_common_proto_goTypes = nil + file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go new file mode 100644 index 00000000..b7545b03 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -0,0 +1,193 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + v1 "go.opentelemetry.io/proto/otlp/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Resource information. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *Resource) GetAttributes() []*v1.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Resource) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc +) + +func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData) + }) + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData +} + +var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } +func file_opentelemetry_proto_resource_v1_resource_proto_init() { + if File_opentelemetry_proto_resource_v1_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_resource_v1_resource_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_resource_v1_resource_proto_msgTypes, + }.Build() + File_opentelemetry_proto_resource_v1_resource_proto = out.File + file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil + file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil + file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go new file mode 100644 index 00000000..d7099c35 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -0,0 +1,1281 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.6 +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // The zero value for the enum. Should not be used for comparisons. + // Instead use bitwise "and" with the appropriate mask as shown above. + SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0 + // Bits 0-7 are used for trace flags. + SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256 + SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512 +) + +// Enum value maps for SpanFlags. +var ( + SpanFlags_name = map[int32]string{ + 0: "SPAN_FLAGS_DO_NOT_USE", + 255: "SPAN_FLAGS_TRACE_FLAGS_MASK", + 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK", + 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK", + } + SpanFlags_value = map[string]int32{ + "SPAN_FLAGS_DO_NOT_USE": 0, + "SPAN_FLAGS_TRACE_FLAGS_MASK": 255, + "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256, + "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512, + } +) + +func (x SpanFlags) Enum() *SpanFlags { + p := new(SpanFlags) + *p = x + return p +} + +func (x SpanFlags) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SpanFlags) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor() +} + +func (SpanFlags) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0] +} + +func (x SpanFlags) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SpanFlags.Descriptor instead. +func (SpanFlags) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +// Enum value maps for Span_SpanKind. +var ( + Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", + } + Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, + } +) + +func (x Span_SpanKind) Enum() *Span_SpanKind { + p := new(Span_SpanKind) + *p = x + return p +} + +func (x Span_SpanKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor() +} + +func (Span_SpanKind) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1] +} + +func (x Span_SpanKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Span_SpanKind.Descriptor instead. +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +// Enum value maps for Status_StatusCode. +var ( + Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", + } + Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, + } +) + +func (x Status_StatusCode) Enum() *Status_StatusCode { + p := new(Status_StatusCode) + *p = x + return p +} + +func (x Status_StatusCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor() +} + +func (Status_StatusCode) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2] +} + +func (x Status_StatusCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status_StatusCode.Descriptor instead. +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} +} + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type TracesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *TracesData) Reset() { + *x = TracesData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracesData) ProtoMessage() {} + +func (x *TracesData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracesData.ProtoReflect.Descriptor instead. +func (*TracesData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +func (x *TracesData) GetResourceSpans() []*ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the resource data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ResourceSpans) Reset() { + *x = ResourceSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSpans) ProtoMessage() {} + +func (x *ResourceSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead. +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceSpans) GetResource() *v1.Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans { + if x != nil { + return x.ScopeSpans + } + return nil +} + +func (x *ResourceSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ScopeSpans) Reset() { + *x = ScopeSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ScopeSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScopeSpans) ProtoMessage() {} + +func (x *ScopeSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead. +func (*ScopeSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} +} + +func (x *ScopeSpans) GetScope() *v11.InstrumentationScope { + if x != nil { + return x.Scope + } + return nil +} + +func (x *ScopeSpans) GetSpans() []*Span { + if x != nil { + return x.Spans + } + return nil +} + +func (x *ScopeSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} +} + +func (x *Span) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span) GetParentSpanId() []byte { + if x != nil { + return x.ParentSpanId + } + return nil +} + +func (x *Span) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetKind() Span_SpanKind { + if x != nil { + return x.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (x *Span) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *Span) GetEndTimeUnixNano() uint64 { + if x != nil { + return x.EndTimeUnixNano + } + return 0 +} + +func (x *Span) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span) GetEvents() []*Span_Event { + if x != nil { + return x.Events + } + return nil +} + +func (x *Span) GetDroppedEventsCount() uint32 { + if x != nil { + return x.DroppedEventsCount + } + return 0 +} + +func (x *Span) GetLinks() []*Span_Link { + if x != nil { + return x.Links + } + return nil +} + +func (x *Span) GetDroppedLinksCount() uint32 { + if x != nil { + return x.DroppedLinksCount + } + return 0 +} + +func (x *Span) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetCode() Status_StatusCode { + if x != nil { + return x.Code + } + return Status_STATUS_CODE_UNSET +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Span_Event) Reset() { + *x = Span_Event{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Event) ProtoMessage() {} + +func (x *Span_Event) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead. +func (*Span_Event) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *Span_Event) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *Span_Event) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span_Event) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Event) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (x *Span_Link) Reset() { + *x = Span_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Link) ProtoMessage() {} + +func (x *Span_Link) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. +func (*Span_Link) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Span_Link) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span_Link) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span_Link) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span_Link) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Link) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span_Link) GetFlags() uint32 { + if x != nil { + return x.Flags + } + return 0 +} + +var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, + 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, + 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, + 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, + 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, + 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, + 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, + 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, + 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, + 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, + 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, + 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, + 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, + 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, + 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, + 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, + 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, + 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, + 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, + 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, + 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, + 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, + 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a, + 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a, + 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e, + 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e, + 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41, + 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50, + 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, + 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, + 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, + 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f, + 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77, + 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc +) + +func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData) + }) + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData +} + +var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ + (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags + (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans + (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans + (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ + 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans + 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind + 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event + 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link + 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status + 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode + 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() } +func file_opentelemetry_proto_trace_v1_trace_proto_init() { + if File_opentelemetry_proto_trace_v1_trace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ScopeSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc, + NumEnums: 3, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_trace_v1_trace_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_trace_v1_trace_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_trace_v1_trace_proto_msgTypes, + }.Build() + File_opentelemetry_proto_trace_v1_trace_proto = out.File + file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil + file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil + file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 6713acca..c3895478 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -1,243 +1,2791 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - // func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 PXOR X1, X0 PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 PXOR X1, X0 PXOR X2, X0 PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 9ae8206c..f75162e0 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -1,722 +1,4517 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 JGE noinc INCQ R9 MOVQ R9, 8(DX) noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) VZEROUPPER - RET -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 VMOVDQA X0, X8 VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 VMOVDQU 16(AX), X11 VMOVDQU 32(AX), X2 VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 VMOVDQA X10, X0 VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 VMOVDQA X12, 16(R10) VMOVDQA X13, 32(R10) VMOVDQA X14, 48(R10) VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 VMOVDQA X12, 80(R10) VMOVDQA X13, 96(R10) VMOVDQA X14, 112(R10) VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 VMOVDQA X12, 144(R10) VMOVDQA X13, 160(R10) VMOVDQA X14, 176(R10) VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 VMOVDQA X12, 208(R10) VMOVDQA X13, 224(R10) VMOVDQA X14, 240(R10) VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff VMOVDQU 32(AX), X14 VMOVDQU 48(AX), X15 VPXOR X0, X10, X10 @@ -729,16 +4524,36 @@ noinc: VPXOR X7, X15, X3 VMOVDQU X2, 32(AX) VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 - RET +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index adfac00c..9a0ce212 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -1,278 +1,1441 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 - RET +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index e0d3c647..13375738 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -1,108 +1,93 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. //go:build gc && !purego -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) +// func update(state *macState, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI MOVQ msg_base+8(FP), SI MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 JB bytes_between_0_and_15 loop: - POLY1305_ADD(SI, R8, R9, R10) + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop bytes_between_0_and_15: TESTQ R15, R15 JZ done - MOVQ $1, BX + MOVQ $0x00000001, BX XORQ CX, CX XORQ R13, R13 ADDQ R15, SI flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX + SHLQ $0x08, BX, CX + SHLQ $0x08, BX MOVB -1(SI), R13 XORQ R13, BX DECQ SI DECQ R15 JNZ flush_buffer - ADDQ BX, R8 ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 JMP multiply done: - MOVQ R8, 0(DI) + MOVQ R8, (DI) MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index 1ea9275b..a01ef435 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { // leftEncode returns max 9 bytes c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) c.initBlock = append(c.initBlock, S...) c.Write(bytepad(c.initBlock, c.rate)) return &c diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go index 21bfa870..c1b43610 100644 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -175,6 +175,15 @@ func (r *keyring) Add(key AddedKey) error { p.expire = &t } + // If we already have a Signer with the same public key, replace it with the + // new one. + for idx, k := range r.keys { + if bytes.Equal(k.signer.PublicKey().Marshal(), p.signer.PublicKey().Marshal()) { + r.keys[idx] = p + return nil + } + } + r.keys = append(r.keys, p) return nil diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 3ca9e89e..c0d1c29e 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -510,8 +510,8 @@ userAuthLoop: if err := s.transport.writePacket(Marshal(discMsg)); err != nil { return nil, err } - - return nil, discMsg + authErrs = append(authErrs, discMsg) + return nil, &ServerAuthError{Errors: authErrs} } var userAuthReq userAuthRequestMsg diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 00000000..2c033dff --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 00000000..fbf1934a --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 00000000..46ceac34 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,515 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +package slices + +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[S ~[]E, E comparable](s1, s2 S) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using an equality +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmpCompare(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { + for i, v1 := range s1 { + if i >= len(s2) { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < len(s2) { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[S ~[]E, E comparable](s S, v E) int { + for i := range s { + if v == s[i] { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { + for i := range s { + if f(s[i]) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[S ~[]E, E comparable](s S, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) + copy(s2[i:], v) + copy(s2[i+m:], s[i:]) + return s2 + } + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s +} + +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s +} + +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// DeleteFunc zeroes the elements between the new length and the original length. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 + } + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. +// Compact zeroes the elements between the new length and the original length. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } + i++ + } + } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 00000000..f58bbc7b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,197 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b or when a is not comparable to b in the sense +// of the formal definition of Strict Weak Ordering. +// +// SortFunc requires that cmp is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +// To indicate 'uncomparable', return 0 from the function. +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + n := len(x) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { + for i := len(x) - 1; i > 0; i-- { + if cmpLess(x[i], x[i-1]) { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { + for i := len(x) - 1; i > 0; i-- { + if cmp(x[i], x[i-1]) < 0 { + return false + } + } + return true +} + +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmpLess(x[h], target) { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) +} + +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go new file mode 100644 index 00000000..06f2c7a2 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownCmpFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { + child++ + } + if !(cmp(data[first+root], data[first+child]) < 0) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownCmpFunc(data, i, hi, first, cmp) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownCmpFunc(data, lo, i, first, cmp) + } +} + +// pdqsortCmpFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortCmpFunc(data, a, b, cmp) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortCmpFunc(data, a, b, cmp) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsCmpFunc(data, a, b, cmp) + limit-- + } + + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) + if hint == decreasingHint { + reverseRangeCmpFunc(data, a, b, cmp) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortCmpFunc(data, a, b, cmp) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) + a = mid + continue + } + + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortCmpFunc(data, a, mid, limit, cmp) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortCmpFunc(data, mid+1, b, limit, cmp) + b = mid + } + } +} + +// partitionCmpFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (cmp(data[i], data[a]) < 0) { + i++ + } + for i <= j && !(cmp(data[j], data[a]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(cmp(data[a], data[i]) < 0) { + i++ + } + for i <= j && (cmp(data[a], data[j]) < 0) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(cmp(data[i], data[i-1]) < 0) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(cmp(data[j], data[j-1]) < 0) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotCmpFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) + } + // Find the median among i, j, k and stores it into j. + j = medianCmpFunc(data, i, j, k, &swaps, cmp) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { + *swaps++ + return b, a + } + return a, b +} + +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) + return b +} + +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) +} + +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortCmpFunc(data, a, b, cmp) + a = b + b += blockSize + } + insertionSortCmpFunc(data, a, n, cmp) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeCmpFunc(data, a, a+blockSize, b, cmp) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeCmpFunc(data, a, m, n, cmp) + } + blockSize *= 2 + } +} + +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmp(data[h], data[a]) < 0 { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(cmp(data[m], data[h]) < 0) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(cmp(data[p-c], data[c]) < 0) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateCmpFunc(data, start, m, end, cmp) + } + if a < start && start < mid { + symMergeCmpFunc(data, a, start, mid, cmp) + } + if mid < end && end < b { + symMergeCmpFunc(data, mid, end, b, cmp) + } +} + +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeCmpFunc(data, m-i, m, j, cmp) + i -= j + } else { + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) + j -= i + } + } + // i == j + swapRangeCmpFunc(data, m-i, m, i, cmp) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 00000000..99b47c39 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { + child++ + } + if !cmpLess(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !cmpLess(data[a-1], data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && cmpLess(data[i], data[a]) { + i++ + } + for i <= j && !cmpLess(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !cmpLess(data[a], data[i]) { + i++ + } + for i <= j && cmpLess(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !cmpLess(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !cmpLess(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if cmpLess(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if cmpLess(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !cmpLess(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !cmpLess(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 00000000..de58dfb8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000..e3784123 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000..060fd6c6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f..7688c356 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3e..617b4a47 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f9..0c5f64aa 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), - } + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), + } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2199,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..6ff6bee7 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000..dc5225b6 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000..c646a695 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000..d6c71101 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// addMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000..eae2a99f --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Since(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index 923a5780..ac76165c 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -8,7 +8,7 @@ // This package currently lacks some features found in an alternative // and more actively maintained WebSocket package: // -// https://pkg.go.dev/nhooyr.io/websocket +// https://pkg.go.dev/github.com/coder/websocket package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/oauth2/LICENSE +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321..109997d7 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 00000000..40518309 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,214 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import ( + "bytes" + "errors" + "fmt" + "runtime" + "runtime/debug" + "sync" +) + +// errGoexit indicates the runtime.Goexit was called in +// the user given function. +var errGoexit = errors.New("runtime.Goexit was called") + +// A panicError is an arbitrary value recovered from a panic +// with the stack trace during the execution of given function. +type panicError struct { + value interface{} + stack []byte +} + +// Error implements error interface. +func (p *panicError) Error() string { + return fmt.Sprintf("%v\n\n%s", p.value, p.stack) +} + +func (p *panicError) Unwrap() error { + err, ok := p.value.(error) + if !ok { + return nil + } + + return err +} + +func newPanicError(v interface{}) error { + stack := debug.Stack() + + // The first line of the stack trace is of the form "goroutine N [status]:" + // but by the time the panic reaches Do the goroutine may no longer exist + // and its status will have changed. Trim out the misleading line. + if line := bytes.IndexByte(stack[:], '\n'); line >= 0 { + stack = stack[line+1:] + } + return &panicError{value: v, stack: stack} +} + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + + if e, ok := c.err.(*panicError); ok { + panic(e) + } else if c.err == errGoexit { + runtime.Goexit() + } + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +// +// The returned channel will not be closed. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + normalReturn := false + recovered := false + + // use double-defer to distinguish panic from runtime.Goexit, + // more details see https://golang.org/cl/134395 + defer func() { + // the given function invoked runtime.Goexit + if !normalReturn && !recovered { + c.err = errGoexit + } + + g.mu.Lock() + defer g.mu.Unlock() + c.wg.Done() + if g.m[key] == c { + delete(g.m, key) + } + + if e, ok := c.err.(*panicError); ok { + // In order to prevent the waiting channels from being blocked forever, + // needs to ensure that this panic cannot be recovered. + if len(c.chans) > 0 { + go panic(e) + select {} // Keep this goroutine around so that it will appear in the crash dump. + } else { + panic(e) + } + } else if c.err == errGoexit { + // Already in the process of goexit, no need to call again + } else { + // Normal return + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + } + }() + + func() { + defer func() { + if !normalReturn { + // Ideally, we would wait to take a stack trace until we've determined + // whether this is a panic or a runtime.Goexit. + // + // Unfortunately, the only way we can distinguish the two is to see + // whether the recover stopped the goroutine from terminating, and by + // the time we know that, the part of the stack trace relevant to the + // panic has been discarded. + if r := recover(); r != nil { + c.err = newPanicError(r) + } + } + }() + + c.val, c.err = fn() + normalReturn = true + }() + + if !normalReturn { + recovered = true + } +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index ec07aab0..02609d5b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -201,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e733..7d902b68 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 00000000..cb4a0c57 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c0..aca3199c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e..6e08a76a 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09e..ac54ecab 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -655,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -665,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..6f15ba1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200a..099867de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4c..f08abd43 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..745e5c7e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..dd2262a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..8cf3670b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000..07ac8e09 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go similarity index 56% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go121.go rename to vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go index b7ef216d..297e97bc 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -2,13 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 -// +build go1.21 +//go:build !linux || !go1.24 -package versions +package unix -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac17..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b24..de3b4624 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2933,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3233,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168..8aa6d77c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b59..da428f42 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e..bf45bfec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8..71c67162 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e..9476628f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea78..b9e85f3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1..a48b68a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b..ea00e852 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6..91c64687 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cef..8cbf38d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a..a2df7341 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a..24791379 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61..d265f146 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c21..3f2d6443 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aac..5d8b727a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533e..24b346e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646b..ebd21310 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f6..824b9c2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75..4f178a22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5ad..af30da55 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f68..f485dbf4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c23..1893e2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf5..16a4017d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a5..a5459e76 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..d003c3d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..0d45a941 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b9..3a69e454 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2486,7 +2514,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3766,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3834,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3951,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4609,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5213,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..ad05b51a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fb..4e613cf6 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 00000000..fd863244 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000..bbf86ccf --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 00000000..f533091c --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 00000000..74db26b9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 00000000..fc1835d8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd1..5cee9a31 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d5..7b97a154 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3..4c2e1bdc 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f5606..df6bf948 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/golang.org/x/text/feature/plural/common.go b/vendor/golang.org/x/text/feature/plural/common.go new file mode 100644 index 00000000..fdcb373f --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/common.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// Form defines a plural form. +// +// Not all languages support all forms. Also, the meaning of each form varies +// per language. It is important to note that the name of a form does not +// necessarily correspond one-to-one with the set of numbers. For instance, +// for Croation, One matches not only 1, but also 11, 21, etc. +// +// Each language must at least support the form "other". +type Form byte + +const ( + Other Form = iota + Zero + One + Two + Few + Many +) + +var countMap = map[string]Form{ + "other": Other, + "zero": Zero, + "one": One, + "two": Two, + "few": Few, + "many": Many, +} + +type pluralCheck struct { + // category: + // 3..7: opID + // 0..2: category + cat byte + setID byte +} + +// opID identifies the type of operand in the plural rule, being i, n or f. +// (v, w, and t are treated as filters in our implementation.) +type opID byte + +const ( + opMod opID = 0x1 // is '%' used? + opNotEqual opID = 0x2 // using "!=" to compare + opI opID = 0 << 2 // integers after taking the absolute value + opN opID = 1 << 2 // full number (must be integer) + opF opID = 2 << 2 // fraction + opV opID = 3 << 2 // number of visible digits + opW opID = 4 << 2 // number of visible digits without trailing zeros + opBretonM opID = 5 << 2 // hard-wired rule for Breton + opItalian800 opID = 6 << 2 // hard-wired rule for Italian + opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan +) +const ( + // Use this plural form to indicate the next rule needs to match as well. + // The last condition in the list will have the correct plural form. + andNext = 0x7 + formMask = 0x7 + + opShift = 3 + + // numN indicates the maximum integer, or maximum mod value, for which we + // have inclusion masks. + numN = 100 + // The common denominator of the modulo that is taken. + maxMod = 100 +) diff --git a/vendor/golang.org/x/text/feature/plural/message.go b/vendor/golang.org/x/text/feature/plural/message.go new file mode 100644 index 00000000..56d518cc --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/message.go @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "fmt" + "io" + "reflect" + "strconv" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// TODO: consider deleting this interface. Maybe VisibleDigits is always +// sufficient and practical. + +// Interface is used for types that can determine their own plural form. +type Interface interface { + // PluralForm reports the plural form for the given language of the + // underlying value. It also returns the integer value. If the integer value + // is larger than fits in n, PluralForm may return a value modulo + // 10,000,000. + PluralForm(t language.Tag, scale int) (f Form, n int) +} + +// Selectf returns the first case for which its selector is a match for the +// arg-th substitution argument to a formatting call, formatting it as indicated +// by format. +// +// The cases argument are pairs of selectors and messages. Selectors are of type +// string or Form. Messages are of type string or catalog.Message. A selector +// matches an argument if: +// - it is "other" or Other +// - it matches the plural form of the argument: "zero", "one", "two", "few", +// or "many", or the equivalent Form +// - it is of the form "=x" where x is an integer that matches the value of +// the argument. +// - it is of the form " kindDefault { + e.EncodeUint(uint64(m.scale)) + } + + forms := validForms(cardinal, e.Language()) + + for i := 0; i < len(m.cases); { + if err := compileSelector(e, forms, m.cases[i]); err != nil { + return err + } + if i++; i >= len(m.cases) { + return fmt.Errorf("plural: no message defined for selector %v", m.cases[i-1]) + } + var msg catalog.Message + switch x := m.cases[i].(type) { + case string: + msg = catalog.String(x) + case catalog.Message: + msg = x + default: + return fmt.Errorf("plural: message of type %T; must be string or catalog.Message", x) + } + if err := e.EncodeMessage(msg); err != nil { + return err + } + i++ + } + return nil +} + +func compileSelector(e *catmsg.Encoder, valid []Form, selector interface{}) error { + form := Other + switch x := selector.(type) { + case string: + if x == "" { + return fmt.Errorf("plural: empty selector") + } + if c := x[0]; c == '=' || c == '<' { + val, err := strconv.ParseUint(x[1:], 10, 16) + if err != nil { + return fmt.Errorf("plural: invalid number in selector %q: %v", selector, err) + } + e.EncodeUint(uint64(c)) + e.EncodeUint(val) + return nil + } + var ok bool + form, ok = countMap[x] + if !ok { + return fmt.Errorf("plural: invalid plural form %q", selector) + } + case Form: + form = x + default: + return fmt.Errorf("plural: selector of type %T; want string or Form", selector) + } + + ok := false + for _, f := range valid { + if f == form { + ok = true + break + } + } + if !ok { + return fmt.Errorf("plural: form %q not supported for language %q", selector, e.Language()) + } + e.EncodeUint(uint64(form)) + return nil +} + +func execute(d *catmsg.Decoder) bool { + lang := d.Language() + argN := int(d.DecodeUint()) + kind := int(d.DecodeUint()) + scale := -1 // default + if kind > kindDefault { + scale = int(d.DecodeUint()) + } + form := Other + n := -1 + if arg := d.Arg(argN); arg == nil { + // Default to Other. + } else if x, ok := arg.(number.VisibleDigits); ok { + d := x.Digits(nil, lang, scale) + form, n = cardinal.matchDisplayDigits(lang, &d) + } else if x, ok := arg.(Interface); ok { + // This covers lists and formatters from the number package. + form, n = x.PluralForm(lang, scale) + } else { + var f number.Formatter + switch kind { + case kindScale: + f.InitDecimal(lang) + f.SetScale(scale) + case kindScientific: + f.InitScientific(lang) + f.SetScale(scale) + case kindPrecision: + f.InitDecimal(lang) + f.SetPrecision(scale) + case kindDefault: + // sensible default + f.InitDecimal(lang) + if k := reflect.TypeOf(arg).Kind(); reflect.Int <= k && k <= reflect.Uintptr { + f.SetScale(0) + } else { + f.SetScale(2) + } + } + var dec number.Decimal // TODO: buffer in Printer + dec.Convert(f.RoundingContext, arg) + v := number.FormatDigits(&dec, f.RoundingContext) + if !v.NaN && !v.Inf { + form, n = cardinal.matchDisplayDigits(d.Language(), &v) + } + } + for !d.Done() { + f := d.DecodeUint() + if (f == '=' && n == int(d.DecodeUint())) || + (f == '<' && 0 <= n && n < int(d.DecodeUint())) || + form == Form(f) || + Other == Form(f) { + return d.ExecuteMessage() + } + d.SkipMessage() + } + return false +} diff --git a/vendor/golang.org/x/text/feature/plural/plural.go b/vendor/golang.org/x/text/feature/plural/plural.go new file mode 100644 index 00000000..e9f2d42e --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/plural.go @@ -0,0 +1,262 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package plural provides utilities for handling linguistic plurals in text. +// +// The definitions in this package are based on the plural rule handling defined +// in CLDR. See +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules for +// details. +package plural + +import ( + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" +) + +// Rules defines the plural rules for all languages for a certain plural type. +// +// This package is UNDER CONSTRUCTION and its API may change. +type Rules struct { + rules []pluralCheck + index []byte + langToIndex []byte + inclusionMasks []uint64 +} + +var ( + // Cardinal defines the plural rules for numbers indicating quantities. + Cardinal *Rules = cardinal + + // Ordinal defines the plural rules for numbers indicating position + // (first, second, etc.). + Ordinal *Rules = ordinal + + ordinal = &Rules{ + ordinalRules, + ordinalIndex, + ordinalLangToIndex, + ordinalInclusionMasks[:], + } + + cardinal = &Rules{ + cardinalRules, + cardinalIndex, + cardinalLangToIndex, + cardinalInclusionMasks[:], + } +) + +// getIntApprox converts the digits in slice digits[start:end] to an integer +// according to the following rules: +// - Let i be asInt(digits[start:end]), where out-of-range digits are assumed +// to be zero. +// - Result n is big if i / 10^nMod > 1. +// - Otherwise the result is i % 10^nMod. +// +// For example, if digits is {1, 2, 3} and start:end is 0:5, then the result +// for various values of nMod is: +// - when nMod == 2, n == big +// - when nMod == 3, n == big +// - when nMod == 4, n == big +// - when nMod == 5, n == 12300 +// - when nMod == 6, n == 12300 +// - when nMod == 7, n == 12300 +func getIntApprox(digits []byte, start, end, nMod, big int) (n int) { + // Leading 0 digits just result in 0. + p := start + if p < 0 { + p = 0 + } + // Range only over the part for which we have digits. + mid := end + if mid >= len(digits) { + mid = len(digits) + } + // Check digits more significant that nMod. + if q := end - nMod; q > 0 { + if q > mid { + q = mid + } + for ; p < q; p++ { + if digits[p] != 0 { + return big + } + } + } + for ; p < mid; p++ { + n = 10*n + int(digits[p]) + } + // Multiply for trailing zeros. + for ; p < end; p++ { + n *= 10 + } + return n +} + +// MatchDigits computes the plural form for the given language and the given +// decimal floating point digits. The digits are stored in big-endian order and +// are of value byte(0) - byte(9). The floating point position is indicated by +// exp and the number of visible decimals is scale. All leading and trailing +// zeros may be omitted from digits. +// +// The following table contains examples of possible arguments to represent +// the given numbers. +// +// decimal digits exp scale +// 123 []byte{1, 2, 3} 3 0 +// 123.4 []byte{1, 2, 3, 4} 3 1 +// 123.40 []byte{1, 2, 3, 4} 3 2 +// 100000 []byte{1} 6 0 +// 100000.00 []byte{1} 6 3 +func (p *Rules) MatchDigits(t language.Tag, digits []byte, exp, scale int) Form { + index := tagToID(t) + + // Differentiate up to including mod 1000000 for the integer part. + n := getIntApprox(digits, 0, exp, 6, 1000000) + + // Differentiate up to including mod 100 for the fractional part. + f := getIntApprox(digits, exp, exp+scale, 2, 100) + + return matchPlural(p, index, n, f, scale) +} + +func (p *Rules) matchDisplayDigits(t language.Tag, d *number.Digits) (Form, int) { + n := getIntApprox(d.Digits, 0, int(d.Exp), 6, 1000000) + return p.MatchDigits(t, d.Digits, int(d.Exp), d.NumFracDigits()), n +} + +func validForms(p *Rules, t language.Tag) (forms []Form) { + offset := p.langToIndex[tagToID(t)] + rules := p.rules[p.index[offset]:p.index[offset+1]] + + forms = append(forms, Other) + last := Other + for _, r := range rules { + if cat := Form(r.cat & formMask); cat != andNext && last != cat { + forms = append(forms, cat) + last = cat + } + } + return forms +} + +func (p *Rules) matchComponents(t language.Tag, n, f, scale int) Form { + return matchPlural(p, tagToID(t), n, f, scale) +} + +// MatchPlural returns the plural form for the given language and plural +// operands (as defined in +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules): +// +// where +// n absolute value of the source number (integer and decimals) +// input +// i integer digits of n. +// v number of visible fraction digits in n, with trailing zeros. +// w number of visible fraction digits in n, without trailing zeros. +// f visible fractional digits in n, with trailing zeros (f = t * 10^(v-w)) +// t visible fractional digits in n, without trailing zeros. +// +// If any of the operand values is too large to fit in an int, it is okay to +// pass the value modulo 10,000,000. +func (p *Rules) MatchPlural(lang language.Tag, i, v, w, f, t int) Form { + return matchPlural(p, tagToID(lang), i, f, v) +} + +func matchPlural(p *Rules, index compact.ID, n, f, v int) Form { + nMask := p.inclusionMasks[n%maxMod] + // Compute the fMask inline in the rules below, as it is relatively rare. + // fMask := p.inclusionMasks[f%maxMod] + vMask := p.inclusionMasks[v%maxMod] + + // Do the matching + offset := p.langToIndex[index] + rules := p.rules[p.index[offset]:p.index[offset+1]] + for i := 0; i < len(rules); i++ { + rule := rules[i] + setBit := uint64(1 << rule.setID) + var skip bool + switch op := opID(rule.cat >> opShift); op { + case opI: // i = x + skip = n >= numN || nMask&setBit == 0 + + case opI | opNotEqual: // i != x + skip = n < numN && nMask&setBit != 0 + + case opI | opMod: // i % m = x + skip = nMask&setBit == 0 + + case opI | opMod | opNotEqual: // i % m != x + skip = nMask&setBit != 0 + + case opN: // n = x + skip = f != 0 || n >= numN || nMask&setBit == 0 + + case opN | opNotEqual: // n != x + skip = f == 0 && n < numN && nMask&setBit != 0 + + case opN | opMod: // n % m = x + skip = f != 0 || nMask&setBit == 0 + + case opN | opMod | opNotEqual: // n % m != x + skip = f == 0 && nMask&setBit != 0 + + case opF: // f = x + skip = f >= numN || p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opNotEqual: // f != x + skip = f < numN && p.inclusionMasks[f%maxMod]&setBit != 0 + + case opF | opMod: // f % m = x + skip = p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opMod | opNotEqual: // f % m != x + skip = p.inclusionMasks[f%maxMod]&setBit != 0 + + case opV: // v = x + skip = v < numN && vMask&setBit == 0 + + case opV | opNotEqual: // v != x + skip = v < numN && vMask&setBit != 0 + + case opW: // w == 0 + skip = f != 0 + + case opW | opNotEqual: // w != 0 + skip = f == 0 + + // Hard-wired rules that cannot be handled by our algorithm. + + case opBretonM: + skip = f != 0 || n == 0 || n%1000000 != 0 + + case opAzerbaijan00s: + // 100,200,300,400,500,600,700,800,900 + skip = n == 0 || n >= 1000 || n%100 != 0 + + case opItalian800: + skip = (f != 0 || n >= numN || nMask&setBit == 0) && n != 800 + } + if skip { + // advance over AND entries. + for ; i < len(rules) && rules[i].cat&formMask == andNext; i++ { + } + continue + } + // return if we have a final entry. + if cat := rule.cat & formMask; cat != andNext { + return Form(cat) + } + } + return Other +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/vendor/golang.org/x/text/feature/plural/tables.go b/vendor/golang.org/x/text/feature/plural/tables.go new file mode 100644 index 00000000..b06b9cb4 --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/tables.go @@ -0,0 +1,552 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var ordinalRules = []pluralCheck{ // 64 elements + 0: {cat: 0x2f, setID: 0x4}, + 1: {cat: 0x3a, setID: 0x5}, + 2: {cat: 0x22, setID: 0x1}, + 3: {cat: 0x22, setID: 0x6}, + 4: {cat: 0x22, setID: 0x7}, + 5: {cat: 0x2f, setID: 0x8}, + 6: {cat: 0x3c, setID: 0x9}, + 7: {cat: 0x2f, setID: 0xa}, + 8: {cat: 0x3c, setID: 0xb}, + 9: {cat: 0x2c, setID: 0xc}, + 10: {cat: 0x24, setID: 0xd}, + 11: {cat: 0x2d, setID: 0xe}, + 12: {cat: 0x2d, setID: 0xf}, + 13: {cat: 0x2f, setID: 0x10}, + 14: {cat: 0x35, setID: 0x3}, + 15: {cat: 0xc5, setID: 0x11}, + 16: {cat: 0x2, setID: 0x1}, + 17: {cat: 0x5, setID: 0x3}, + 18: {cat: 0xd, setID: 0x12}, + 19: {cat: 0x22, setID: 0x1}, + 20: {cat: 0x2f, setID: 0x13}, + 21: {cat: 0x3d, setID: 0x14}, + 22: {cat: 0x2f, setID: 0x15}, + 23: {cat: 0x3a, setID: 0x16}, + 24: {cat: 0x2f, setID: 0x17}, + 25: {cat: 0x3b, setID: 0x18}, + 26: {cat: 0x2f, setID: 0xa}, + 27: {cat: 0x3c, setID: 0xb}, + 28: {cat: 0x22, setID: 0x1}, + 29: {cat: 0x23, setID: 0x19}, + 30: {cat: 0x24, setID: 0x1a}, + 31: {cat: 0x22, setID: 0x1b}, + 32: {cat: 0x23, setID: 0x2}, + 33: {cat: 0x24, setID: 0x1a}, + 34: {cat: 0xf, setID: 0x15}, + 35: {cat: 0x1a, setID: 0x16}, + 36: {cat: 0xf, setID: 0x17}, + 37: {cat: 0x1b, setID: 0x18}, + 38: {cat: 0xf, setID: 0x1c}, + 39: {cat: 0x1d, setID: 0x1d}, + 40: {cat: 0xa, setID: 0x1e}, + 41: {cat: 0xa, setID: 0x1f}, + 42: {cat: 0xc, setID: 0x20}, + 43: {cat: 0xe4, setID: 0x0}, + 44: {cat: 0x5, setID: 0x3}, + 45: {cat: 0xd, setID: 0xe}, + 46: {cat: 0xd, setID: 0x21}, + 47: {cat: 0x22, setID: 0x1}, + 48: {cat: 0x23, setID: 0x19}, + 49: {cat: 0x24, setID: 0x1a}, + 50: {cat: 0x25, setID: 0x22}, + 51: {cat: 0x22, setID: 0x23}, + 52: {cat: 0x23, setID: 0x19}, + 53: {cat: 0x24, setID: 0x1a}, + 54: {cat: 0x25, setID: 0x22}, + 55: {cat: 0x22, setID: 0x24}, + 56: {cat: 0x23, setID: 0x19}, + 57: {cat: 0x24, setID: 0x1a}, + 58: {cat: 0x25, setID: 0x22}, + 59: {cat: 0x21, setID: 0x25}, + 60: {cat: 0x22, setID: 0x1}, + 61: {cat: 0x23, setID: 0x2}, + 62: {cat: 0x24, setID: 0x26}, + 63: {cat: 0x25, setID: 0x27}, +} // Size: 152 bytes + +var ordinalIndex = []uint8{ // 22 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x05, 0x07, 0x09, + 0x0b, 0x0f, 0x10, 0x13, 0x16, 0x1c, 0x1f, 0x22, + 0x28, 0x2f, 0x33, 0x37, 0x3b, 0x40, +} // Size: 46 bytes + +var ordinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, + 0x10, 0x10, 0x10, 0x00, 0x00, 0x05, 0x05, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x12, 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + // Entry C0 - FF + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x09, 0x09, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x0d, 0x0d, 0x02, 0x02, 0x02, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x13, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x0b, 0x0b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x07, 0x07, 0x02, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x0c, +} // Size: 799 bytes + +var ordinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000002000010009, 0x00000018482000d3, 0x0000000042840195, 0x000000410a040581, + 0x00000041040c0081, 0x0000009840040041, 0x0000008400045001, 0x0000003850040001, + 0x0000003850060001, 0x0000003800049001, 0x0000000800052001, 0x0000000040660031, + 0x0000000041840331, 0x0000000100040f01, 0x00000001001c0001, 0x0000000040040001, + 0x0000000000045001, 0x0000000070040001, 0x0000000070040001, 0x0000000000049001, + 0x0000000080050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000000010001, 0x0000000040200011, + // Entry 20 - 3F + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + // Entry 40 - 5F + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000080070001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000200010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + // Entry 60 - 7F + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, +} // Size: 824 bytes + +// Slots used for ordinal: 40 of 0xFF rules; 16 of 0xFF indexes; 40 of 64 sets + +var cardinalRules = []pluralCheck{ // 166 elements + 0: {cat: 0x2, setID: 0x3}, + 1: {cat: 0x22, setID: 0x1}, + 2: {cat: 0x2, setID: 0x4}, + 3: {cat: 0x2, setID: 0x4}, + 4: {cat: 0x7, setID: 0x1}, + 5: {cat: 0x62, setID: 0x3}, + 6: {cat: 0x22, setID: 0x4}, + 7: {cat: 0x7, setID: 0x3}, + 8: {cat: 0x42, setID: 0x1}, + 9: {cat: 0x22, setID: 0x4}, + 10: {cat: 0x22, setID: 0x4}, + 11: {cat: 0x22, setID: 0x5}, + 12: {cat: 0x22, setID: 0x1}, + 13: {cat: 0x22, setID: 0x1}, + 14: {cat: 0x7, setID: 0x4}, + 15: {cat: 0x92, setID: 0x3}, + 16: {cat: 0xf, setID: 0x6}, + 17: {cat: 0x1f, setID: 0x7}, + 18: {cat: 0x82, setID: 0x3}, + 19: {cat: 0x92, setID: 0x3}, + 20: {cat: 0xf, setID: 0x6}, + 21: {cat: 0x62, setID: 0x3}, + 22: {cat: 0x4a, setID: 0x6}, + 23: {cat: 0x7, setID: 0x8}, + 24: {cat: 0x62, setID: 0x3}, + 25: {cat: 0x1f, setID: 0x9}, + 26: {cat: 0x62, setID: 0x3}, + 27: {cat: 0x5f, setID: 0x9}, + 28: {cat: 0x72, setID: 0x3}, + 29: {cat: 0x29, setID: 0xa}, + 30: {cat: 0x29, setID: 0xb}, + 31: {cat: 0x4f, setID: 0xb}, + 32: {cat: 0x61, setID: 0x2}, + 33: {cat: 0x2f, setID: 0x6}, + 34: {cat: 0x3a, setID: 0x7}, + 35: {cat: 0x4f, setID: 0x6}, + 36: {cat: 0x5f, setID: 0x7}, + 37: {cat: 0x62, setID: 0x2}, + 38: {cat: 0x4f, setID: 0x6}, + 39: {cat: 0x72, setID: 0x2}, + 40: {cat: 0x21, setID: 0x3}, + 41: {cat: 0x7, setID: 0x4}, + 42: {cat: 0x32, setID: 0x3}, + 43: {cat: 0x21, setID: 0x3}, + 44: {cat: 0x22, setID: 0x1}, + 45: {cat: 0x22, setID: 0x1}, + 46: {cat: 0x23, setID: 0x2}, + 47: {cat: 0x2, setID: 0x3}, + 48: {cat: 0x22, setID: 0x1}, + 49: {cat: 0x24, setID: 0xc}, + 50: {cat: 0x7, setID: 0x1}, + 51: {cat: 0x62, setID: 0x3}, + 52: {cat: 0x74, setID: 0x3}, + 53: {cat: 0x24, setID: 0x3}, + 54: {cat: 0x2f, setID: 0xd}, + 55: {cat: 0x34, setID: 0x1}, + 56: {cat: 0xf, setID: 0x6}, + 57: {cat: 0x1f, setID: 0x7}, + 58: {cat: 0x62, setID: 0x3}, + 59: {cat: 0x4f, setID: 0x6}, + 60: {cat: 0x5a, setID: 0x7}, + 61: {cat: 0xf, setID: 0xe}, + 62: {cat: 0x1f, setID: 0xf}, + 63: {cat: 0x64, setID: 0x3}, + 64: {cat: 0x4f, setID: 0xe}, + 65: {cat: 0x5c, setID: 0xf}, + 66: {cat: 0x22, setID: 0x10}, + 67: {cat: 0x23, setID: 0x11}, + 68: {cat: 0x24, setID: 0x12}, + 69: {cat: 0xf, setID: 0x1}, + 70: {cat: 0x62, setID: 0x3}, + 71: {cat: 0xf, setID: 0x2}, + 72: {cat: 0x63, setID: 0x3}, + 73: {cat: 0xf, setID: 0x13}, + 74: {cat: 0x64, setID: 0x3}, + 75: {cat: 0x74, setID: 0x3}, + 76: {cat: 0xf, setID: 0x1}, + 77: {cat: 0x62, setID: 0x3}, + 78: {cat: 0x4a, setID: 0x1}, + 79: {cat: 0xf, setID: 0x2}, + 80: {cat: 0x63, setID: 0x3}, + 81: {cat: 0x4b, setID: 0x2}, + 82: {cat: 0xf, setID: 0x13}, + 83: {cat: 0x64, setID: 0x3}, + 84: {cat: 0x4c, setID: 0x13}, + 85: {cat: 0x7, setID: 0x1}, + 86: {cat: 0x62, setID: 0x3}, + 87: {cat: 0x7, setID: 0x2}, + 88: {cat: 0x63, setID: 0x3}, + 89: {cat: 0x2f, setID: 0xa}, + 90: {cat: 0x37, setID: 0x14}, + 91: {cat: 0x65, setID: 0x3}, + 92: {cat: 0x7, setID: 0x1}, + 93: {cat: 0x62, setID: 0x3}, + 94: {cat: 0x7, setID: 0x15}, + 95: {cat: 0x64, setID: 0x3}, + 96: {cat: 0x75, setID: 0x3}, + 97: {cat: 0x7, setID: 0x1}, + 98: {cat: 0x62, setID: 0x3}, + 99: {cat: 0xf, setID: 0xe}, + 100: {cat: 0x1f, setID: 0xf}, + 101: {cat: 0x64, setID: 0x3}, + 102: {cat: 0xf, setID: 0x16}, + 103: {cat: 0x17, setID: 0x1}, + 104: {cat: 0x65, setID: 0x3}, + 105: {cat: 0xf, setID: 0x17}, + 106: {cat: 0x65, setID: 0x3}, + 107: {cat: 0xf, setID: 0xf}, + 108: {cat: 0x65, setID: 0x3}, + 109: {cat: 0x2f, setID: 0x6}, + 110: {cat: 0x3a, setID: 0x7}, + 111: {cat: 0x2f, setID: 0xe}, + 112: {cat: 0x3c, setID: 0xf}, + 113: {cat: 0x2d, setID: 0xa}, + 114: {cat: 0x2d, setID: 0x17}, + 115: {cat: 0x2d, setID: 0x18}, + 116: {cat: 0x2f, setID: 0x6}, + 117: {cat: 0x3a, setID: 0xb}, + 118: {cat: 0x2f, setID: 0x19}, + 119: {cat: 0x3c, setID: 0xb}, + 120: {cat: 0x55, setID: 0x3}, + 121: {cat: 0x22, setID: 0x1}, + 122: {cat: 0x24, setID: 0x3}, + 123: {cat: 0x2c, setID: 0xc}, + 124: {cat: 0x2d, setID: 0xb}, + 125: {cat: 0xf, setID: 0x6}, + 126: {cat: 0x1f, setID: 0x7}, + 127: {cat: 0x62, setID: 0x3}, + 128: {cat: 0xf, setID: 0xe}, + 129: {cat: 0x1f, setID: 0xf}, + 130: {cat: 0x64, setID: 0x3}, + 131: {cat: 0xf, setID: 0xa}, + 132: {cat: 0x65, setID: 0x3}, + 133: {cat: 0xf, setID: 0x17}, + 134: {cat: 0x65, setID: 0x3}, + 135: {cat: 0xf, setID: 0x18}, + 136: {cat: 0x65, setID: 0x3}, + 137: {cat: 0x2f, setID: 0x6}, + 138: {cat: 0x3a, setID: 0x1a}, + 139: {cat: 0x2f, setID: 0x1b}, + 140: {cat: 0x3b, setID: 0x1c}, + 141: {cat: 0x2f, setID: 0x1d}, + 142: {cat: 0x3c, setID: 0x1e}, + 143: {cat: 0x37, setID: 0x3}, + 144: {cat: 0xa5, setID: 0x0}, + 145: {cat: 0x22, setID: 0x1}, + 146: {cat: 0x23, setID: 0x2}, + 147: {cat: 0x24, setID: 0x1f}, + 148: {cat: 0x25, setID: 0x20}, + 149: {cat: 0xf, setID: 0x6}, + 150: {cat: 0x62, setID: 0x3}, + 151: {cat: 0xf, setID: 0x1b}, + 152: {cat: 0x63, setID: 0x3}, + 153: {cat: 0xf, setID: 0x21}, + 154: {cat: 0x64, setID: 0x3}, + 155: {cat: 0x75, setID: 0x3}, + 156: {cat: 0x21, setID: 0x3}, + 157: {cat: 0x22, setID: 0x1}, + 158: {cat: 0x23, setID: 0x2}, + 159: {cat: 0x2c, setID: 0x22}, + 160: {cat: 0x2d, setID: 0x5}, + 161: {cat: 0x21, setID: 0x3}, + 162: {cat: 0x22, setID: 0x1}, + 163: {cat: 0x23, setID: 0x2}, + 164: {cat: 0x24, setID: 0x23}, + 165: {cat: 0x25, setID: 0x24}, +} // Size: 356 bytes + +var cardinalIndex = []uint8{ // 36 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x06, 0x09, 0x0a, + 0x0c, 0x0d, 0x10, 0x14, 0x17, 0x1d, 0x28, 0x2b, + 0x2d, 0x2f, 0x32, 0x38, 0x42, 0x45, 0x4c, 0x55, + 0x5c, 0x61, 0x6d, 0x74, 0x79, 0x7d, 0x89, 0x91, + 0x95, 0x9c, 0xa1, 0xa6, +} // Size: 60 bytes + +var cardinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x06, 0x06, + 0x01, 0x01, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x01, 0x01, 0x08, 0x08, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x1a, 0x1a, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x00, 0x00, + // Entry 40 - 7F + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x1e, 0x1e, + 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x18, 0x18, 0x00, 0x00, 0x22, 0x22, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x00, 0x00, 0x16, 0x16, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + // Entry 100 - 13F + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x08, 0x08, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x0c, 0x0c, + 0x08, 0x08, 0x08, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x08, 0x08, 0x04, 0x04, 0x1f, 0x1f, + 0x14, 0x14, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, + 0x01, 0x01, 0x06, 0x00, 0x00, 0x20, 0x20, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x17, 0x17, 0x01, + 0x01, 0x13, 0x13, 0x13, 0x16, 0x16, 0x08, 0x08, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x04, 0x0a, 0x0a, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x10, 0x17, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x04, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x02, + 0x02, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x0f, 0x0f, 0x08, 0x10, + // Entry 1C0 - 1FF + 0x10, 0x08, 0x08, 0x0e, 0x0e, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x0d, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, + 0x00, 0x00, 0x08, 0x08, 0x0b, 0x0b, 0x08, 0x08, + 0x08, 0x08, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x08, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x08, + 0x06, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x08, 0x19, 0x19, 0x0d, 0x0d, + 0x08, 0x08, 0x03, 0x04, 0x03, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x12, + 0x12, 0x12, 0x08, 0x08, 0x1d, 0x1d, 0x1d, 0x1d, + 0x1d, 0x1d, 0x1d, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x13, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x05, 0x05, 0x18, 0x18, 0x15, 0x15, 0x10, 0x10, + // Entry 280 - 2BF + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x06, + 0x08, 0x08, 0x08, 0x0c, 0x08, 0x00, 0x00, 0x08, + // Entry 2C0 - 2FF + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x07, + 0x07, 0x08, 0x08, 0x1d, 0x1d, 0x04, 0x04, 0x04, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x00, 0x00, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x01, 0x01, 0x04, 0x04, +} // Size: 799 bytes + +var cardinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000000200500419, 0x0000000000512153, 0x000000000a327105, 0x0000000ca23c7101, + 0x00000004a23c7201, 0x0000000482943001, 0x0000001482943201, 0x0000000502943001, + 0x0000000502943001, 0x0000000522943201, 0x0000000540543401, 0x00000000454128e1, + 0x000000005b02e821, 0x000000006304e821, 0x000000006304ea21, 0x0000000042842821, + 0x0000000042842a21, 0x0000000042842821, 0x0000000042842821, 0x0000000062842a21, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + // Entry 20 - 3F + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + // Entry 40 - 5F + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + // Entry 60 - 7F + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, +} // Size: 824 bytes + +// Slots used for cardinal: A6 of 0xFF rules; 24 of 0xFF indexes; 37 of 64 sets + +// Total table size 3860 bytes (3KiB); checksum: AAFBF21 diff --git a/vendor/golang.org/x/text/internal/catmsg/catmsg.go b/vendor/golang.org/x/text/internal/catmsg/catmsg.go new file mode 100644 index 00000000..1b257a7b --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/catmsg.go @@ -0,0 +1,417 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catmsg contains support types for package x/text/message/catalog. +// +// This package contains the low-level implementations of Message used by the +// catalog package and provides primitives for other packages to implement their +// own. For instance, the plural package provides functionality for selecting +// translation strings based on the plural category of substitution arguments. +// +// # Encoding and Decoding +// +// Catalogs store Messages encoded as a single string. Compiling a message into +// a string both results in compacter representation and speeds up evaluation. +// +// A Message must implement a Compile method to convert its arbitrary +// representation to a string. The Compile method takes an Encoder which +// facilitates serializing the message. Encoders also provide more context of +// the messages's creation (such as for which language the message is intended), +// which may not be known at the time of the creation of the message. +// +// Each message type must also have an accompanying decoder registered to decode +// the message. This decoder takes a Decoder argument which provides the +// counterparts for the decoding. +// +// # Renderers +// +// A Decoder must be initialized with a Renderer implementation. These +// implementations must be provided by packages that use Catalogs, typically +// formatting packages such as x/text/message. A typical user will not need to +// worry about this type; it is only relevant to packages that do string +// formatting and want to use the catalog package to handle localized strings. +// +// A package that uses catalogs for selecting strings receives selection results +// as sequence of substrings passed to the Renderer. The following snippet shows +// how to express the above example using the message package. +// +// message.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Select(1, "one", "minute")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) // always 5 minutes late. +// +// To evaluate the Printf, package message wraps the arguments in a Renderer +// that is passed to the catalog for message decoding. The call sequence that +// results from evaluating the above message, assuming the person is rather +// tardy, is: +// +// Render("You are %[1]d ") +// Arg(1) +// Render("minutes") +// Render(" late.") +// +// The calls to Arg is caused by the plural.Select execution, which evaluates +// the argument to determine whether the singular or plural message form should +// be selected. The calls to Render reports the partial results to the message +// package for further evaluation. +package catmsg + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "golang.org/x/text/language" +) + +// A Handle refers to a registered message type. +type Handle int + +// A Handler decodes and evaluates data compiled by a Message and sends the +// result to the Decoder. The output may depend on the value of the substitution +// arguments, accessible by the Decoder's Arg method. The Handler returns false +// if there is no translation for the given substitution arguments. +type Handler func(d *Decoder) bool + +// Register records the existence of a message type and returns a Handle that +// can be used in the Encoder's EncodeMessageType method to create such +// messages. The prefix of the name should be the package path followed by +// an optional disambiguating string. +// Register will panic if a handle for the same name was already registered. +func Register(name string, handler Handler) Handle { + mutex.Lock() + defer mutex.Unlock() + + if _, ok := names[name]; ok { + panic(fmt.Errorf("catmsg: handler for %q already exists", name)) + } + h := Handle(len(handlers)) + names[name] = h + handlers = append(handlers, handler) + return h +} + +// These handlers require fixed positions in the handlers slice. +const ( + msgVars Handle = iota + msgFirst + msgRaw + msgString + msgAffix + // Leave some arbitrary room for future expansion: 20 should suffice. + numInternal = 20 +) + +const prefix = "golang.org/x/text/internal/catmsg." + +var ( + // TODO: find a more stable way to link handles to message types. + mutex sync.Mutex + names = map[string]Handle{ + prefix + "Vars": msgVars, + prefix + "First": msgFirst, + prefix + "Raw": msgRaw, + prefix + "String": msgString, + prefix + "Affix": msgAffix, + } + handlers = make([]Handler, numInternal) +) + +func init() { + // This handler is a message type wrapper that initializes a decoder + // with a variable block. This message type, if present, is always at the + // start of an encoded message. + handlers[msgVars] = func(d *Decoder) bool { + blockSize := int(d.DecodeUint()) + d.vars = d.data[:blockSize] + d.data = d.data[blockSize:] + return d.executeMessage() + } + + // First takes the first message in a sequence that results in a match for + // the given substitution arguments. + handlers[msgFirst] = func(d *Decoder) bool { + for !d.Done() { + if d.ExecuteMessage() { + return true + } + } + return false + } + + handlers[msgRaw] = func(d *Decoder) bool { + d.Render(d.data) + return true + } + + // A String message alternates between a string constant and a variable + // substitution. + handlers[msgString] = func(d *Decoder) bool { + for !d.Done() { + if str := d.DecodeString(); str != "" { + d.Render(str) + } + if d.Done() { + break + } + d.ExecuteSubstitution() + } + return true + } + + handlers[msgAffix] = func(d *Decoder) bool { + // TODO: use an alternative method for common cases. + prefix := d.DecodeString() + suffix := d.DecodeString() + if prefix != "" { + d.Render(prefix) + } + ret := d.ExecuteMessage() + if suffix != "" { + d.Render(suffix) + } + return ret + } +} + +var ( + // ErrIncomplete indicates a compiled message does not define translations + // for all possible argument values. If this message is returned, evaluating + // a message may result in the ErrNoMatch error. + ErrIncomplete = errors.New("catmsg: incomplete message; may not give result for all inputs") + + // ErrNoMatch indicates no translation message matched the given input + // parameters when evaluating a message. + ErrNoMatch = errors.New("catmsg: no translation for inputs") +) + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + // Compile encodes the format string(s) of the message as a string for later + // evaluation. + // + // The first call Compile makes on the encoder must be EncodeMessageType. + // The handle passed to this call may either be a handle returned by + // Register to encode a single custom message, or HandleFirst followed by + // a sequence of calls to EncodeMessage. + // + // Compile must return ErrIncomplete if it is possible for evaluation to + // not match any translation for a given set of formatting parameters. + // For example, selecting a translation based on plural form may not yield + // a match if the form "Other" is not one of the selectors. + // + // Compile may return any other application-specific error. For backwards + // compatibility with package like fmt, which often do not do sanity + // checking of format strings ahead of time, Compile should still make an + // effort to have some sensible fallback in case of an error. + Compile(e *Encoder) error +} + +// Compile converts a Message to a data string that can be stored in a Catalog. +// The resulting string can subsequently be decoded by passing to the Execute +// method of a Decoder. +func Compile(tag language.Tag, macros Dictionary, m Message) (data string, err error) { + // TODO: pass macros so they can be used for validation. + v := &Encoder{inBody: true} // encoder for variables + v.root = v + e := &Encoder{root: v, parent: v, tag: tag} // encoder for messages + err = m.Compile(e) + // This package serves te message package, which in turn is meant to be a + // drop-in replacement for fmt. With the fmt package, format strings are + // evaluated lazily and errors are handled by substituting strings in the + // result, rather then returning an error. Dealing with multiple languages + // makes it more important to check errors ahead of time. We chose to be + // consistent and compatible and allow graceful degradation in case of + // errors. + buf := e.buf[stripPrefix(e.buf):] + if len(v.buf) > 0 { + // Prepend variable block. + b := make([]byte, 1+maxVarintBytes+len(v.buf)+len(buf)) + b[0] = byte(msgVars) + b = b[:1+encodeUint(b[1:], uint64(len(v.buf)))] + b = append(b, v.buf...) + b = append(b, buf...) + buf = b + } + if err == nil { + err = v.err + } + return string(buf), err +} + +// FirstOf is a message type that prints the first message in the sequence that +// resolves to a match for the given substitution arguments. +type FirstOf []Message + +// Compile implements Message. +func (s FirstOf) Compile(e *Encoder) error { + e.EncodeMessageType(msgFirst) + err := ErrIncomplete + for i, m := range s { + if err == nil { + return fmt.Errorf("catalog: message argument %d is complete and blocks subsequent messages", i-1) + } + err = e.EncodeMessage(m) + } + return err +} + +// Var defines a message that can be substituted for a placeholder of the same +// name. If an expression does not result in a string after evaluation, Name is +// used as the substitution. For example: +// +// Var{ +// Name: "minutes", +// Message: plural.Select(1, "one", "minute"), +// } +// +// will resolve to minute for singular and minutes for plural forms. +type Var struct { + Name string + Message Message +} + +var errIsVar = errors.New("catmsg: variable used as message") + +// Compile implements Message. +// +// Note that this method merely registers a variable; it does not create an +// encoded message. +func (v *Var) Compile(e *Encoder) error { + if err := e.addVar(v.Name, v.Message); err != nil { + return err + } + // Using a Var by itself is an error. If it is in a sequence followed by + // other messages referring to it, this error will be ignored. + return errIsVar +} + +// Raw is a message consisting of a single format string that is passed as is +// to the Renderer. +// +// Note that a Renderer may still do its own variable substitution. +type Raw string + +// Compile implements Message. +func (r Raw) Compile(e *Encoder) (err error) { + e.EncodeMessageType(msgRaw) + // Special case: raw strings don't have a size encoding and so don't use + // EncodeString. + e.buf = append(e.buf, r...) + return nil +} + +// String is a message consisting of a single format string which contains +// placeholders that may be substituted with variables. +// +// Variable substitutions are marked with placeholders and a variable name of +// the form ${name}. Any other substitutions such as Go templates or +// printf-style substitutions are left to be done by the Renderer. +// +// When evaluation a string interpolation, a Renderer will receive separate +// calls for each placeholder and interstitial string. For example, for the +// message: "%[1]v ${invites} %[2]v to ${their} party." The sequence of calls +// is: +// +// d.Render("%[1]v ") +// d.Arg(1) +// d.Render(resultOfInvites) +// d.Render(" %[2]v to ") +// d.Arg(2) +// d.Render(resultOfTheir) +// d.Render(" party.") +// +// where the messages for "invites" and "their" both use a plural.Select +// referring to the first argument. +// +// Strings may also invoke macros. Macros are essentially variables that can be +// reused. Macros may, for instance, be used to make selections between +// different conjugations of a verb. See the catalog package description for an +// overview of macros. +type String string + +// Compile implements Message. It parses the placeholder formats and returns +// any error. +func (s String) Compile(e *Encoder) (err error) { + msg := string(s) + const subStart = "${" + hasHeader := false + p := 0 + b := []byte{} + for { + i := strings.Index(msg[p:], subStart) + if i == -1 { + break + } + b = append(b, msg[p:p+i]...) + p += i + len(subStart) + if i = strings.IndexByte(msg[p:], '}'); i == -1 { + b = append(b, "$!(MISSINGBRACE)"...) + err = fmt.Errorf("catmsg: missing '}'") + p = len(msg) + break + } + name := strings.TrimSpace(msg[p : p+i]) + if q := strings.IndexByte(name, '('); q == -1 { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name) + b = b[:0] + } else if j := strings.IndexByte(name[q:], ')'); j == -1 { + // TODO: what should the error be? + b = append(b, "$!(MISSINGPAREN)"...) + err = fmt.Errorf("catmsg: missing ')'") + } else if x, sErr := strconv.ParseUint(strings.TrimSpace(name[q+1:q+j]), 10, 32); sErr != nil { + // TODO: handle more than one argument + b = append(b, "$!(BADNUM)"...) + err = fmt.Errorf("catmsg: invalid number %q", strings.TrimSpace(name[q+1:q+j])) + } else { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name[:q], int(x)) + b = b[:0] + } + p += i + 1 + } + b = append(b, msg[p:]...) + if !hasHeader { + // Simplify string to a raw string. + Raw(string(b)).Compile(e) + } else if len(b) > 0 { + e.EncodeString(string(b)) + } + return err +} + +// Affix is a message that adds a prefix and suffix to another message. +// This is mostly used add back whitespace to a translation that was stripped +// before sending it out. +type Affix struct { + Message Message + Prefix string + Suffix string +} + +// Compile implements Message. +func (a Affix) Compile(e *Encoder) (err error) { + // TODO: consider adding a special message type that just adds a single + // return. This is probably common enough to handle the majority of cases. + // Get some stats first, though. + e.EncodeMessageType(msgAffix) + e.EncodeString(a.Prefix) + e.EncodeString(a.Suffix) + e.EncodeMessage(a.Message) + return nil +} diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go new file mode 100644 index 00000000..547802b0 --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/codec.go @@ -0,0 +1,407 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +import ( + "errors" + "fmt" + + "golang.org/x/text/language" +) + +// A Renderer renders a Message. +type Renderer interface { + // Render renders the given string. The given string may be interpreted as a + // format string, such as the one used by the fmt package or a template. + Render(s string) + + // Arg returns the i-th argument passed to format a message. This method + // should return nil if there is no such argument. Messages need access to + // arguments to allow selecting a message based on linguistic features of + // those arguments. + Arg(i int) interface{} +} + +// A Dictionary specifies a source of messages, including variables or macros. +type Dictionary interface { + // Lookup returns the message for the given key. It returns false for ok if + // such a message could not be found. + Lookup(key string) (data string, ok bool) + + // TODO: consider returning an interface, instead of a string. This will + // allow implementations to do their own message type decoding. +} + +// An Encoder serializes a Message to a string. +type Encoder struct { + // The root encoder is used for storing encoded variables. + root *Encoder + // The parent encoder provides the surrounding scopes for resolving variable + // names. + parent *Encoder + + tag language.Tag + + // buf holds the encoded message so far. After a message completes encoding, + // the contents of buf, prefixed by the encoded length, are flushed to the + // parent buffer. + buf []byte + + // vars is the lookup table of variables in the current scope. + vars []keyVal + + err error + inBody bool // if false next call must be EncodeMessageType +} + +type keyVal struct { + key string + offset int +} + +// Language reports the language for which the encoded message will be stored +// in the Catalog. +func (e *Encoder) Language() language.Tag { return e.tag } + +func (e *Encoder) setError(err error) { + if e.root.err == nil { + e.root.err = err + } +} + +// EncodeUint encodes x. +func (e *Encoder) EncodeUint(x uint64) { + e.checkInBody() + var buf [maxVarintBytes]byte + n := encodeUint(buf[:], x) + e.buf = append(e.buf, buf[:n]...) +} + +// EncodeString encodes s. +func (e *Encoder) EncodeString(s string) { + e.checkInBody() + e.EncodeUint(uint64(len(s))) + e.buf = append(e.buf, s...) +} + +// EncodeMessageType marks the current message to be of type h. +// +// It must be the first call of a Message's Compile method. +func (e *Encoder) EncodeMessageType(h Handle) { + if e.inBody { + panic("catmsg: EncodeMessageType not the first method called") + } + e.inBody = true + e.EncodeUint(uint64(h)) +} + +// EncodeMessage serializes the given message inline at the current position. +func (e *Encoder) EncodeMessage(m Message) error { + e = &Encoder{root: e.root, parent: e, tag: e.tag} + err := m.Compile(e) + if _, ok := m.(*Var); !ok { + e.flushTo(e.parent) + } + return err +} + +func (e *Encoder) checkInBody() { + if !e.inBody { + panic("catmsg: expected prior call to EncodeMessageType") + } +} + +// stripPrefix indicates the number of prefix bytes that must be stripped to +// turn a single-element sequence into a message that is just this single member +// without its size prefix. If the message can be stripped, b[1:n] contains the +// size prefix. +func stripPrefix(b []byte) (n int) { + if len(b) > 0 && Handle(b[0]) == msgFirst { + x, n, _ := decodeUint(b[1:]) + if 1+n+int(x) == len(b) { + return 1 + n + } + } + return 0 +} + +func (e *Encoder) flushTo(dst *Encoder) { + data := e.buf + p := stripPrefix(data) + if p > 0 { + data = data[1:] + } else { + // Prefix the size. + dst.EncodeUint(uint64(len(data))) + } + dst.buf = append(dst.buf, data...) +} + +func (e *Encoder) addVar(key string, m Message) error { + for _, v := range e.parent.vars { + if v.key == key { + err := fmt.Errorf("catmsg: duplicate variable %q", key) + e.setError(err) + return err + } + } + scope := e.parent + // If a variable message is Incomplete, and does not evaluate to a message + // during execution, we fall back to the variable name. We encode this by + // appending the variable name if the message reports it's incomplete. + + err := m.Compile(e) + if err != ErrIncomplete { + e.setError(err) + } + switch { + case len(e.buf) == 1 && Handle(e.buf[0]) == msgFirst: // empty sequence + e.buf = e.buf[:0] + e.inBody = false + fallthrough + case len(e.buf) == 0: + // Empty message. + if err := String(key).Compile(e); err != nil { + e.setError(err) + } + case err == ErrIncomplete: + if Handle(e.buf[0]) != msgFirst { + seq := &Encoder{root: e.root, parent: e} + seq.EncodeMessageType(msgFirst) + e.flushTo(seq) + e = seq + } + // e contains a sequence; append the fallback string. + e.EncodeMessage(String(key)) + } + + // Flush result to variable heap. + offset := len(e.root.buf) + e.flushTo(e.root) + e.buf = e.buf[:0] + + // Record variable offset in current scope. + scope.vars = append(scope.vars, keyVal{key: key, offset: offset}) + return err +} + +const ( + substituteVar = iota + substituteMacro + substituteError +) + +// EncodeSubstitution inserts a resolved reference to a variable or macro. +// +// This call must be matched with a call to ExecuteSubstitution at decoding +// time. +func (e *Encoder) EncodeSubstitution(name string, arguments ...int) { + if arity := len(arguments); arity > 0 { + // TODO: also resolve macros. + e.EncodeUint(substituteMacro) + e.EncodeString(name) + for _, a := range arguments { + e.EncodeUint(uint64(a)) + } + return + } + for scope := e; scope != nil; scope = scope.parent { + for _, v := range scope.vars { + if v.key != name { + continue + } + e.EncodeUint(substituteVar) // TODO: support arity > 0 + e.EncodeUint(uint64(v.offset)) + return + } + } + // TODO: refer to dictionary-wide scoped variables. + e.EncodeUint(substituteError) + e.EncodeString(name) + e.setError(fmt.Errorf("catmsg: unknown var %q", name)) +} + +// A Decoder deserializes and evaluates messages that are encoded by an encoder. +type Decoder struct { + tag language.Tag + dst Renderer + macros Dictionary + + err error + vars string + data string + + macroArg int // TODO: allow more than one argument +} + +// NewDecoder returns a new Decoder. +// +// Decoders are designed to be reused for multiple invocations of Execute. +// Only one goroutine may call Execute concurrently. +func NewDecoder(tag language.Tag, r Renderer, macros Dictionary) *Decoder { + return &Decoder{ + tag: tag, + dst: r, + macros: macros, + } +} + +func (d *Decoder) setError(err error) { + if d.err == nil { + d.err = err + } +} + +// Language returns the language in which the message is being rendered. +// +// The destination language may be a child language of the language used for +// encoding. For instance, a decoding language of "pt-PT" is consistent with an +// encoding language of "pt". +func (d *Decoder) Language() language.Tag { return d.tag } + +// Done reports whether there are more bytes to process in this message. +func (d *Decoder) Done() bool { return len(d.data) == 0 } + +// Render implements Renderer. +func (d *Decoder) Render(s string) { d.dst.Render(s) } + +// Arg implements Renderer. +// +// During evaluation of macros, the argument positions may be mapped to +// arguments that differ from the original call. +func (d *Decoder) Arg(i int) interface{} { + if d.macroArg != 0 { + if i != 1 { + panic("catmsg: only macros with single argument supported") + } + i = d.macroArg + } + return d.dst.Arg(i) +} + +// DecodeUint decodes a number that was encoded with EncodeUint and advances the +// position. +func (d *Decoder) DecodeUint() uint64 { + x, n, err := decodeUintString(d.data) + d.data = d.data[n:] + if err != nil { + d.setError(err) + } + return x +} + +// DecodeString decodes a string that was encoded with EncodeString and advances +// the position. +func (d *Decoder) DecodeString() string { + size := d.DecodeUint() + s := d.data[:size] + d.data = d.data[size:] + return s +} + +// SkipMessage skips the message at the current location and advances the +// position. +func (d *Decoder) SkipMessage() { + n := int(d.DecodeUint()) + d.data = d.data[n:] +} + +// Execute decodes and evaluates msg. +// +// Only one goroutine may call execute. +func (d *Decoder) Execute(msg string) error { + d.err = nil + if !d.execute(msg) { + return ErrNoMatch + } + return d.err +} + +func (d *Decoder) execute(msg string) bool { + saved := d.data + d.data = msg + ok := d.executeMessage() + d.data = saved + return ok +} + +// executeMessageFromData is like execute, but also decodes a leading message +// size and clips the given string accordingly. +// +// It reports the number of bytes consumed and whether a message was selected. +func (d *Decoder) executeMessageFromData(s string) (n int, ok bool) { + saved := d.data + d.data = s + size := int(d.DecodeUint()) + n = len(s) - len(d.data) + // Sanitize the setting. This allows skipping a size argument for + // RawString and method Done. + d.data = d.data[:size] + ok = d.executeMessage() + n += size - len(d.data) + d.data = saved + return n, ok +} + +var errUnknownHandler = errors.New("catmsg: string contains unsupported handler") + +// executeMessage reads the handle id, initializes the decoder and executes the +// message. It is assumed that all of d.data[d.p:] is the single message. +func (d *Decoder) executeMessage() bool { + if d.Done() { + // We interpret no data as a valid empty message. + return true + } + handle := d.DecodeUint() + + var fn Handler + mutex.Lock() + if int(handle) < len(handlers) { + fn = handlers[handle] + } + mutex.Unlock() + if fn == nil { + d.setError(errUnknownHandler) + d.execute(fmt.Sprintf("\x02$!(UNKNOWNMSGHANDLER=%#x)", handle)) + return true + } + return fn(d) +} + +// ExecuteMessage decodes and executes the message at the current position. +func (d *Decoder) ExecuteMessage() bool { + n, ok := d.executeMessageFromData(d.data) + d.data = d.data[n:] + return ok +} + +// ExecuteSubstitution executes the message corresponding to the substitution +// as encoded by EncodeSubstitution. +func (d *Decoder) ExecuteSubstitution() { + switch x := d.DecodeUint(); x { + case substituteVar: + offset := d.DecodeUint() + d.executeMessageFromData(d.vars[offset:]) + case substituteMacro: + name := d.DecodeString() + data, ok := d.macros.Lookup(name) + old := d.macroArg + // TODO: support macros of arity other than 1. + d.macroArg = int(d.DecodeUint()) + switch { + case !ok: + // TODO: detect this at creation time. + d.setError(fmt.Errorf("catmsg: undefined macro %q", name)) + fallthrough + case !d.execute(data): + d.dst.Render(name) // fall back to macro name. + } + d.macroArg = old + case substituteError: + d.dst.Render(d.DecodeString()) + default: + panic("catmsg: unreachable") + } +} diff --git a/vendor/golang.org/x/text/internal/catmsg/varint.go b/vendor/golang.org/x/text/internal/catmsg/varint.go new file mode 100644 index 00000000..a2cee2cf --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/varint.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +// This file implements varint encoding analogous to the one in encoding/binary. +// We need a string version of this function, so we add that here and then add +// the rest for consistency. + +import "errors" + +var ( + errIllegalVarint = errors.New("catmsg: illegal varint") + errVarintTooLarge = errors.New("catmsg: varint too large for uint64") +) + +const maxVarintBytes = 10 // maximum length of a varint + +// encodeUint encodes x as a variable-sized integer into buf and returns the +// number of bytes written. buf must be at least maxVarintBytes long +func encodeUint(buf []byte, x uint64) (n int) { + for ; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return n +} + +func decodeUintString(s string) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(s) { + return 0, i, errIllegalVarint + } + b := uint64(s[i]) + i++ + x |= (b & 0x7F) << shift + if b&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} + +func decodeUint(b []byte) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(b) { + return 0, i, errIllegalVarint + } + c := uint64(b[i]) + i++ + x |= (c & 0x7F) << shift + if c&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} diff --git a/vendor/golang.org/x/text/internal/format/format.go b/vendor/golang.org/x/text/internal/format/format.go new file mode 100644 index 00000000..ee1c57a3 --- /dev/null +++ b/vendor/golang.org/x/text/internal/format/format.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format contains types for defining language-specific formatting of +// values. +// +// This package is internal now, but will eventually be exposed after the API +// settles. +package format // import "golang.org/x/text/internal/format" + +import ( + "fmt" + + "golang.org/x/text/language" +) + +// State represents the printer state passed to custom formatters. It provides +// access to the fmt.State interface and the sentence and language-related +// context. +type State interface { + fmt.State + + // Language reports the requested language in which to render a message. + Language() language.Tag + + // TODO: consider this and removing rune from the Format method in the + // Formatter interface. + // + // Verb returns the format variant to render, analogous to the types used + // in fmt. Use 'v' for the default or only variant. + // Verb() rune + + // TODO: more info: + // - sentence context such as linguistic features passed by the translator. +} + +// Formatter is analogous to fmt.Formatter. +type Formatter interface { + Format(state State, verb rune) +} diff --git a/vendor/golang.org/x/text/internal/format/parser.go b/vendor/golang.org/x/text/internal/format/parser.go new file mode 100644 index 00000000..855aed71 --- /dev/null +++ b/vendor/golang.org/x/text/internal/format/parser.go @@ -0,0 +1,358 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package format + +import ( + "reflect" + "unicode/utf8" +) + +// A Parser parses a format string. The result from the parse are set in the +// struct fields. +type Parser struct { + Verb rune + + WidthPresent bool + PrecPresent bool + Minus bool + Plus bool + Sharp bool + Space bool + Zero bool + + // For the formats %+v %#v, we set the plusV/sharpV flags + // and clear the plus/sharp flags since %+v and %#v are in effect + // different, flagless formats set at the top level. + PlusV bool + SharpV bool + + HasIndex bool + + Width int + Prec int // precision + + // retain arguments across calls. + Args []interface{} + // retain current argument number across calls + ArgNum int + + // reordered records whether the format string used argument reordering. + Reordered bool + // goodArgNum records whether the most recent reordering directive was valid. + goodArgNum bool + + // position info + format string + startPos int + endPos int + Status Status +} + +// Reset initializes a parser to scan format strings for the given args. +func (p *Parser) Reset(args []interface{}) { + p.Args = args + p.ArgNum = 0 + p.startPos = 0 + p.Reordered = false +} + +// Text returns the part of the format string that was parsed by the last call +// to Scan. It returns the original substitution clause if the current scan +// parsed a substitution. +func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] } + +// SetFormat sets a new format string to parse. It does not reset the argument +// count. +func (p *Parser) SetFormat(format string) { + p.format = format + p.startPos = 0 + p.endPos = 0 +} + +// Status indicates the result type of a call to Scan. +type Status int + +const ( + StatusText Status = iota + StatusSubstitution + StatusBadWidthSubstitution + StatusBadPrecSubstitution + StatusNoVerb + StatusBadArgNum + StatusMissingArg +) + +// ClearFlags reset the parser to default behavior. +func (p *Parser) ClearFlags() { + p.WidthPresent = false + p.PrecPresent = false + p.Minus = false + p.Plus = false + p.Sharp = false + p.Space = false + p.Zero = false + + p.PlusV = false + p.SharpV = false + + p.HasIndex = false +} + +// Scan scans the next part of the format string and sets the status to +// indicate whether it scanned a string literal, substitution or error. +func (p *Parser) Scan() bool { + p.Status = StatusText + format := p.format + end := len(format) + if p.endPos >= end { + return false + } + afterIndex := false // previous item in format was an index like [3]. + + p.startPos = p.endPos + p.goodArgNum = true + i := p.startPos + for i < end && format[i] != '%' { + i++ + } + if i > p.startPos { + p.endPos = i + return true + } + // Process one verb + i++ + + p.Status = StatusSubstitution + + // Do we have flags? + p.ClearFlags() + +simpleFormat: + for ; i < end; i++ { + c := p.format[i] + switch c { + case '#': + p.Sharp = true + case '0': + p.Zero = !p.Minus // Only allow zero padding to the left. + case '+': + p.Plus = true + case '-': + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + case ' ': + p.Space = true + default: + // Fast path for common case of ascii lower case simple verbs + // without precision or width or argument indices. + if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) { + if c == 'v' { + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + } + p.Verb = rune(c) + p.ArgNum++ + p.endPos = i + 1 + return true + } + // Format is more complex than simple flags and a verb or is malformed. + break simpleFormat + } + } + + // Do we have an explicit argument index? + i, afterIndex = p.updateArgNumber(format, i) + + // Do we have width? + if i < end && format[i] == '*' { + i++ + p.Width, p.WidthPresent = p.intFromArg() + + if !p.WidthPresent { + p.Status = StatusBadWidthSubstitution + } + + // We have a negative width, so take its value and ensure + // that the minus flag is set + if p.Width < 0 { + p.Width = -p.Width + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + } + afterIndex = false + } else { + p.Width, p.WidthPresent, i = parsenum(format, i, end) + if afterIndex && p.WidthPresent { // "%[3]2d" + p.goodArgNum = false + } + } + + // Do we have precision? + if i+1 < end && format[i] == '.' { + i++ + if afterIndex { // "%[3].2d" + p.goodArgNum = false + } + i, afterIndex = p.updateArgNumber(format, i) + if i < end && format[i] == '*' { + i++ + p.Prec, p.PrecPresent = p.intFromArg() + // Negative precision arguments don't make sense + if p.Prec < 0 { + p.Prec = 0 + p.PrecPresent = false + } + if !p.PrecPresent { + p.Status = StatusBadPrecSubstitution + } + afterIndex = false + } else { + p.Prec, p.PrecPresent, i = parsenum(format, i, end) + if !p.PrecPresent { + p.Prec = 0 + p.PrecPresent = true + } + } + } + + if !afterIndex { + i, afterIndex = p.updateArgNumber(format, i) + } + p.HasIndex = afterIndex + + if i >= end { + p.endPos = i + p.Status = StatusNoVerb + return true + } + + verb, w := utf8.DecodeRuneInString(format[i:]) + p.endPos = i + w + p.Verb = verb + + switch { + case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec. + p.startPos = p.endPos - 1 + p.Status = StatusText + case !p.goodArgNum: + p.Status = StatusBadArgNum + case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb. + p.Status = StatusMissingArg + p.ArgNum++ + case verb == 'v': + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + fallthrough + default: + p.ArgNum++ + } + return true +} + +// intFromArg gets the ArgNumth element of Args. On return, isInt reports +// whether the argument has integer type. +func (p *Parser) intFromArg() (num int, isInt bool) { + if p.ArgNum < len(p.Args) { + arg := p.Args[p.ArgNum] + num, isInt = arg.(int) // Almost always OK. + if !isInt { + // Work harder. + switch v := reflect.ValueOf(arg); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := v.Int() + if int64(int(n)) == n { + num = int(n) + isInt = true + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := v.Uint() + if int64(n) >= 0 && uint64(int(n)) == n { + num = int(n) + isInt = true + } + default: + // Already 0, false. + } + } + p.ArgNum++ + if tooLarge(num) { + num = 0 + isInt = false + } + } + return +} + +// parseArgNumber returns the value of the bracketed number, minus 1 +// (explicit argument numbers are one-indexed but we want zero-indexed). +// The opening bracket is known to be present at format[0]. +// The returned values are the index, the number of bytes to consume +// up to the closing paren, if present, and whether the number parsed +// ok. The bytes to consume will be 1 if no closing paren is present. +func parseArgNumber(format string) (index int, wid int, ok bool) { + // There must be at least 3 bytes: [n]. + if len(format) < 3 { + return 0, 1, false + } + + // Find closing bracket. + for i := 1; i < len(format); i++ { + if format[i] == ']' { + width, ok, newi := parsenum(format, 1, i) + if !ok || newi != i { + return 0, i + 1, false + } + return width - 1, i + 1, true // arg numbers are one-indexed and skip paren. + } + } + return 0, 1, false +} + +// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in +// argNum or the value of the bracketed integer that begins format[i:]. It also returns +// the new value of i, that is, the index of the next byte of the format to process. +func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) { + if len(format) <= i || format[i] != '[' { + return i, false + } + p.Reordered = true + index, wid, ok := parseArgNumber(format[i:]) + if ok && 0 <= index && index < len(p.Args) { + p.ArgNum = index + return i + wid, true + } + p.goodArgNum = false + return i + wid, ok +} + +// tooLarge reports whether the magnitude of the integer is +// too large to be used as a formatting width or precision. +func tooLarge(x int) bool { + const max int = 1e6 + return x > max || x < -max +} + +// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present. +func parsenum(s string, start, end int) (num int, isnum bool, newi int) { + if start >= end { + return 0, false, end + } + for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ { + if tooLarge(num) { + return 0, false, end // Overflow; crazy long number most likely. + } + num = num*10 + int(s[newi]-'0') + isnum = true + } + return +} diff --git a/vendor/golang.org/x/text/internal/internal.go b/vendor/golang.org/x/text/internal/internal.go new file mode 100644 index 00000000..3cddbbdd --- /dev/null +++ b/vendor/golang.org/x/text/internal/internal.go @@ -0,0 +1,49 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains non-exported functionality that are used by +// packages in the text repository. +package internal // import "golang.org/x/text/internal" + +import ( + "sort" + + "golang.org/x/text/language" +) + +// SortTags sorts tags in place. +func SortTags(tags []language.Tag) { + sort.Sort(sorter(tags)) +} + +type sorter []language.Tag + +func (s sorter) Len() int { + return len(s) +} + +func (s sorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s sorter) Less(i, j int) bool { + return s[i].String() < s[j].String() +} + +// UniqueTags sorts and filters duplicate tags in place and returns a slice with +// only unique tags. +func UniqueTags(tags []language.Tag) []language.Tag { + if len(tags) <= 1 { + return tags + } + SortTags(tags) + k := 0 + for i := 1; i < len(tags); i++ { + if tags[k].String() < tags[i].String() { + k++ + tags[k] = tags[i] + } + } + return tags[:k+1] +} diff --git a/vendor/golang.org/x/text/internal/match.go b/vendor/golang.org/x/text/internal/match.go new file mode 100644 index 00000000..1cc004a6 --- /dev/null +++ b/vendor/golang.org/x/text/internal/match.go @@ -0,0 +1,67 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// This file contains matchers that implement CLDR inheritance. +// +// See https://unicode.org/reports/tr35/#Locale_Inheritance. +// +// Some of the inheritance described in this document is already handled by +// the cldr package. + +import ( + "golang.org/x/text/language" +) + +// TODO: consider if (some of the) matching algorithm needs to be public after +// getting some feel about what is generic and what is specific. + +// NewInheritanceMatcher returns a matcher that matches based on the inheritance +// chain. +// +// The matcher uses canonicalization and the parent relationship to find a +// match. The resulting match will always be either Und or a language with the +// same language and script as the requested language. It will not match +// languages for which there is understood to be mutual or one-directional +// intelligibility. +// +// A Match will indicate an Exact match if the language matches after +// canonicalization and High if the matched tag is a parent. +func NewInheritanceMatcher(t []language.Tag) *InheritanceMatcher { + tags := &InheritanceMatcher{make(map[language.Tag]int)} + for i, tag := range t { + ct, err := language.All.Canonicalize(tag) + if err != nil { + ct = tag + } + tags.index[ct] = i + } + return tags +} + +type InheritanceMatcher struct { + index map[language.Tag]int +} + +func (m InheritanceMatcher) Match(want ...language.Tag) (language.Tag, int, language.Confidence) { + for _, t := range want { + ct, err := language.All.Canonicalize(t) + if err != nil { + ct = t + } + conf := language.Exact + for { + if index, ok := m.index[ct]; ok { + return ct, index, conf + } + if ct == language.Und { + break + } + ct = ct.Parent() + conf = language.High + } + } + return language.Und, 0, language.No +} diff --git a/vendor/golang.org/x/text/internal/number/common.go b/vendor/golang.org/x/text/internal/number/common.go new file mode 100644 index 00000000..a6e9c8e0 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/common.go @@ -0,0 +1,55 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" +) + +// A system identifies a CLDR numbering system. +type system byte + +type systemData struct { + id system + digitSize byte // number of UTF-8 bytes per digit + zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit. +} + +// A SymbolType identifies a symbol of a specific kind. +type SymbolType int + +const ( + SymDecimal SymbolType = iota + SymGroup + SymList + SymPercentSign + SymPlusSign + SymMinusSign + SymExponential + SymSuperscriptingExponent + SymPerMille + SymInfinity + SymNan + SymTimeSeparator + + NumSymbolTypes +) + +const hasNonLatnMask = 0x8000 + +// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask +// is not 0 (with this bit masked out), and an offset into symIndex otherwise. +// +// TODO: this type can be a byte again if we use an indirection into altsymData +// and introduce an alt -> offset slice (the length of this will be number of +// alternatives plus 1). This also allows getting rid of the compactTag field +// in altSymData. In total this will save about 1K. +type symOffset uint16 + +type altSymData struct { + compactTag compact.ID + symIndex symOffset + system system +} diff --git a/vendor/golang.org/x/text/internal/number/decimal.go b/vendor/golang.org/x/text/internal/number/decimal.go new file mode 100644 index 00000000..e128cf34 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/decimal.go @@ -0,0 +1,500 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type RoundingMode + +package number + +import ( + "math" + "strconv" +) + +// RoundingMode determines how a number is rounded to the desired precision. +type RoundingMode byte + +const ( + ToNearestEven RoundingMode = iota // towards the nearest integer, or towards an even number if equidistant. + ToNearestZero // towards the nearest integer, or towards zero if equidistant. + ToNearestAway // towards the nearest integer, or away from zero if equidistant. + ToPositiveInf // towards infinity + ToNegativeInf // towards negative infinity + ToZero // towards zero + AwayFromZero // away from zero + numModes +) + +const maxIntDigits = 20 + +// A Decimal represents a floating point number in decimal format. +// Digits represents a number [0, 1.0), and the absolute value represented by +// Decimal is Digits * 10^Exp. Leading and trailing zeros may be omitted and Exp +// may point outside a valid position in Digits. +// +// Examples: +// +// Number Decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 +// 12000 Digits: [1, 2], Exp: 5 +// 12000.00 Digits: [1, 2], Exp: 5 +// 0.00123 Digits: [1, 2, 3], Exp: -2 +// 0 Digits: [], Exp: 0 +type Decimal struct { + digits + + buf [maxIntDigits]byte +} + +type digits struct { + Digits []byte // mantissa digits, big-endian + Exp int32 // exponent + Neg bool + Inf bool // Takes precedence over Digits and Exp. + NaN bool // Takes precedence over Inf. +} + +// Digits represents a floating point number represented in digits of the +// base in which a number is to be displayed. It is similar to Decimal, but +// keeps track of trailing fraction zeros and the comma placement for +// engineering notation. Digits must have at least one digit. +// +// Examples: +// +// Number Decimal +// decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 End: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 End: 5 +// 12000 Digits: [1, 2], Exp: 5 End: 5 +// 12000.00 Digits: [1, 2], Exp: 5 End: 7 +// 0.00123 Digits: [1, 2, 3], Exp: -2 End: 3 +// 0 Digits: [], Exp: 0 End: 1 +// scientific (actual exp is Exp - Comma) +// 0e0 Digits: [0], Exp: 1, End: 1, Comma: 1 +// .0e0 Digits: [0], Exp: 0, End: 1, Comma: 0 +// 0.0e0 Digits: [0], Exp: 1, End: 2, Comma: 1 +// 1.23e4 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 1 +// .123e5 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 0 +// engineering +// 12.3e3 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 2 +type Digits struct { + digits + // End indicates the end position of the number. + End int32 // For decimals Exp <= End. For scientific len(Digits) <= End. + // Comma is used for the comma position for scientific (always 0 or 1) and + // engineering notation (always 0, 1, 2, or 3). + Comma uint8 + // IsScientific indicates whether this number is to be rendered as a + // scientific number. + IsScientific bool +} + +func (d *Digits) NumFracDigits() int { + if d.Exp >= d.End { + return 0 + } + return int(d.End - d.Exp) +} + +// normalize returns a new Decimal with leading and trailing zeros removed. +func (d *Decimal) normalize() (n Decimal) { + n = *d + b := n.Digits + // Strip leading zeros. Resulting number of digits is significant digits. + for len(b) > 0 && b[0] == 0 { + b = b[1:] + n.Exp-- + } + // Strip trailing zeros + for len(b) > 0 && b[len(b)-1] == 0 { + b = b[:len(b)-1] + } + if len(b) == 0 { + n.Exp = 0 + } + n.Digits = b + return n +} + +func (d *Decimal) clear() { + b := d.Digits + if b == nil { + b = d.buf[:0] + } + *d = Decimal{} + d.Digits = b[:0] +} + +func (x *Decimal) String() string { + if x.NaN { + return "NaN" + } + var buf []byte + if x.Neg { + buf = append(buf, '-') + } + if x.Inf { + buf = append(buf, "Inf"...) + return string(buf) + } + switch { + case len(x.Digits) == 0: + buf = append(buf, '0') + case x.Exp <= 0: + // 0.00ddd + buf = append(buf, "0."...) + buf = appendZeros(buf, -int(x.Exp)) + buf = appendDigits(buf, x.Digits) + + case /* 0 < */ int(x.Exp) < len(x.Digits): + // dd.ddd + buf = appendDigits(buf, x.Digits[:x.Exp]) + buf = append(buf, '.') + buf = appendDigits(buf, x.Digits[x.Exp:]) + + default: // len(x.Digits) <= x.Exp + // ddd00 + buf = appendDigits(buf, x.Digits) + buf = appendZeros(buf, int(x.Exp)-len(x.Digits)) + } + return string(buf) +} + +func appendDigits(buf []byte, digits []byte) []byte { + for _, c := range digits { + buf = append(buf, c+'0') + } + return buf +} + +// appendZeros appends n 0 digits to buf and returns buf. +func appendZeros(buf []byte, n int) []byte { + for ; n > 0; n-- { + buf = append(buf, '0') + } + return buf +} + +func (d *digits) round(mode RoundingMode, n int) { + if n >= len(d.Digits) { + return + } + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + inc := false + switch mode { + case ToNegativeInf: + inc = d.Neg + case ToPositiveInf: + inc = !d.Neg + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && + (len(d.Digits) > n+1 || n == 0 || d.Digits[n-1]&1 != 0) + case ToNearestAway: + inc = d.Digits[n] >= 5 + case ToNearestZero: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && len(d.Digits) > n+1 + default: + panic("unreachable") + } + if inc { + d.roundUp(n) + } else { + d.roundDown(n) + } +} + +// roundFloat rounds a floating point number. +func (r RoundingMode) roundFloat(x float64) float64 { + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + abs := x + if x < 0 { + abs = -x + } + i, f := math.Modf(abs) + if f == 0.0 { + return x + } + inc := false + switch r { + case ToNegativeInf: + inc = x < 0 + case ToPositiveInf: + inc = x >= 0 + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + // TODO: check overflow + inc = f > 0.5 || f == 0.5 && int64(i)&1 != 0 + case ToNearestAway: + inc = f >= 0.5 + case ToNearestZero: + inc = f > 0.5 + default: + panic("unreachable") + } + if inc { + i += 1 + } + if abs != x { + i = -i + } + return i +} + +func (x *digits) roundUp(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + // find first digit < 9 + for n > 0 && x.Digits[n-1] >= 9 { + n-- + } + + if n == 0 { + // all digits are 9s => round up to 1 and update exponent + x.Digits[0] = 1 // ok since len(x.Digits) > n + x.Digits = x.Digits[:1] + x.Exp++ + return + } + x.Digits[n-1]++ + x.Digits = x.Digits[:n] + // x already trimmed +} + +func (x *digits) roundDown(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + x.Digits = x.Digits[:n] + trim(x) +} + +// trim cuts off any trailing zeros from x's mantissa; +// they are meaningless for the value of x. +func trim(x *digits) { + i := len(x.Digits) + for i > 0 && x.Digits[i-1] == 0 { + i-- + } + x.Digits = x.Digits[:i] + if i == 0 { + x.Exp = 0 + } +} + +// A Converter converts a number into decimals according to the given rounding +// criteria. +type Converter interface { + Convert(d *Decimal, r RoundingContext) +} + +const ( + signed = true + unsigned = false +) + +// Convert converts the given number to the decimal representation using the +// supplied RoundingContext. +func (d *Decimal) Convert(r RoundingContext, number interface{}) { + switch f := number.(type) { + case Converter: + d.clear() + f.Convert(d, r) + case float32: + d.ConvertFloat(r, float64(f), 32) + case float64: + d.ConvertFloat(r, f, 64) + case int: + d.ConvertInt(r, signed, uint64(f)) + case int8: + d.ConvertInt(r, signed, uint64(f)) + case int16: + d.ConvertInt(r, signed, uint64(f)) + case int32: + d.ConvertInt(r, signed, uint64(f)) + case int64: + d.ConvertInt(r, signed, uint64(f)) + case uint: + d.ConvertInt(r, unsigned, uint64(f)) + case uint8: + d.ConvertInt(r, unsigned, uint64(f)) + case uint16: + d.ConvertInt(r, unsigned, uint64(f)) + case uint32: + d.ConvertInt(r, unsigned, uint64(f)) + case uint64: + d.ConvertInt(r, unsigned, f) + + default: + d.NaN = true + // TODO: + // case string: if produced by strconv, allows for easy arbitrary pos. + // case reflect.Value: + // case big.Float + // case big.Int + // case big.Rat? + // catch underlyings using reflect or will this already be done by the + // message package? + } +} + +// ConvertInt converts an integer to decimals. +func (d *Decimal) ConvertInt(r RoundingContext, signed bool, x uint64) { + if r.Increment > 0 { + // TODO: if uint64 is too large, fall back to float64 + if signed { + d.ConvertFloat(r, float64(int64(x)), 64) + } else { + d.ConvertFloat(r, float64(x), 64) + } + return + } + d.clear() + if signed && int64(x) < 0 { + x = uint64(-int64(x)) + d.Neg = true + } + d.fillIntDigits(x) + d.Exp = int32(len(d.Digits)) +} + +// ConvertFloat converts a floating point number to decimals. +func (d *Decimal) ConvertFloat(r RoundingContext, x float64, size int) { + d.clear() + if math.IsNaN(x) { + d.NaN = true + return + } + // Simple case: decimal notation + if r.Increment > 0 { + scale := int(r.IncrementScale) + mult := 1.0 + if scale >= len(scales) { + mult = math.Pow(10, float64(scale)) + } else { + mult = scales[scale] + } + // We multiply x instead of dividing inc as it gives less rounding + // issues. + x *= mult + x /= float64(r.Increment) + x = r.Mode.roundFloat(x) + x *= float64(r.Increment) + x /= mult + } + + abs := x + if x < 0 { + d.Neg = true + abs = -x + } + if math.IsInf(abs, 1) { + d.Inf = true + return + } + + // By default we get the exact decimal representation. + verb := byte('g') + prec := -1 + // As the strconv API does not return the rounding accuracy, we can only + // round using ToNearestEven. + if r.Mode == ToNearestEven { + if n := r.RoundSignificantDigits(); n >= 0 { + prec = n + } else if n = r.RoundFractionDigits(); n >= 0 { + prec = n + verb = 'f' + } + } else { + // TODO: At this point strconv's rounding is imprecise to the point that + // it is not usable for this purpose. + // See https://github.com/golang/go/issues/21714 + // If rounding is requested, we ask for a large number of digits and + // round from there to simulate rounding only once. + // Ideally we would have strconv export an AppendDigits that would take + // a rounding mode and/or return an accuracy. Something like this would + // work: + // AppendDigits(dst []byte, x float64, base, size, prec int) (digits []byte, exp, accuracy int) + hasPrec := r.RoundSignificantDigits() >= 0 + hasScale := r.RoundFractionDigits() >= 0 + if hasPrec || hasScale { + // prec is the number of mantissa bits plus some extra for safety. + // We need at least the number of mantissa bits as decimals to + // accurately represent the floating point without rounding, as each + // bit requires one more decimal to represent: 0.5, 0.25, 0.125, ... + prec = 60 + } + } + + b := strconv.AppendFloat(d.Digits[:0], abs, verb, prec, size) + i := 0 + k := 0 + beforeDot := 1 + for i < len(b) { + if c := b[i]; '0' <= c && c <= '9' { + b[k] = c - '0' + k++ + d.Exp += int32(beforeDot) + } else if c == '.' { + beforeDot = 0 + d.Exp = int32(k) + } else { + break + } + i++ + } + d.Digits = b[:k] + if i != len(b) { + i += len("e") + pSign := i + exp := 0 + for i++; i < len(b); i++ { + exp *= 10 + exp += int(b[i] - '0') + } + if b[pSign] == '-' { + exp = -exp + } + d.Exp = int32(exp) + 1 + } +} + +func (d *Decimal) fillIntDigits(x uint64) { + if cap(d.Digits) < maxIntDigits { + d.Digits = d.buf[:] + } else { + d.Digits = d.buf[:maxIntDigits] + } + i := 0 + for ; x > 0; x /= 10 { + d.Digits[i] = byte(x % 10) + i++ + } + d.Digits = d.Digits[:i] + for p := 0; p < i; p++ { + i-- + d.Digits[p], d.Digits[i] = d.Digits[i], d.Digits[p] + } +} + +var scales [70]float64 + +func init() { + x := 1.0 + for i := range scales { + scales[i] = x + x *= 10 + } +} diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go new file mode 100644 index 00000000..cd94c5dc --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/format.go @@ -0,0 +1,535 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "strconv" + "unicode/utf8" + + "golang.org/x/text/language" +) + +// TODO: +// - grouping of fractions +// - allow user-defined superscript notation (such as 4) +// - same for non-breaking spaces, like   + +// A VisibleDigits computes digits, comma placement and trailing zeros as they +// will be shown to the user. +type VisibleDigits interface { + Digits(buf []byte, t language.Tag, scale int) Digits + // TODO: Do we also need to add the verb or pass a format.State? +} + +// Formatting proceeds along the following lines: +// 0) Compose rounding information from format and context. +// 1) Convert a number into a Decimal. +// 2) Sanitize Decimal by adding trailing zeros, removing leading digits, and +// (non-increment) rounding. The Decimal that results from this is suitable +// for determining the plural form. +// 3) Render the Decimal in the localized form. + +// Formatter contains all the information needed to render a number. +type Formatter struct { + Pattern + Info +} + +func (f *Formatter) init(t language.Tag, index []uint8) { + f.Info = InfoFromTag(t) + f.Pattern = formats[index[tagToID(t)]] +} + +// InitPattern initializes a Formatter for the given Pattern. +func (f *Formatter) InitPattern(t language.Tag, pat *Pattern) { + f.Info = InfoFromTag(t) + f.Pattern = *pat +} + +// InitDecimal initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitDecimal(t language.Tag) { + f.init(t, tagToDecimal) +} + +// InitScientific initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitScientific(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 +} + +// InitEngineering initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitEngineering(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 + f.Pattern.MaxIntegerDigits = 3 + f.Pattern.MinIntegerDigits = 1 +} + +// InitPercent initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPercent(t language.Tag) { + f.init(t, tagToPercent) +} + +// InitPerMille initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPerMille(t language.Tag) { + f.init(t, tagToPercent) + f.Pattern.DigitShift = 3 +} + +func (f *Formatter) Append(dst []byte, x interface{}) []byte { + var d Decimal + r := f.RoundingContext + d.Convert(r, x) + return f.Render(dst, FormatDigits(&d, r)) +} + +func FormatDigits(d *Decimal, r RoundingContext) Digits { + if r.isScientific() { + return scientificVisibleDigits(r, d) + } + return decimalVisibleDigits(r, d) +} + +func (f *Formatter) Format(dst []byte, d *Decimal) []byte { + return f.Render(dst, FormatDigits(d, f.RoundingContext)) +} + +func (f *Formatter) Render(dst []byte, d Digits) []byte { + var result []byte + var postPrefix, preSuffix int + if d.IsScientific { + result, postPrefix, preSuffix = appendScientific(dst, f, &d) + } else { + result, postPrefix, preSuffix = appendDecimal(dst, f, &d) + } + if f.PadRune == 0 { + return result + } + width := int(f.FormatWidth) + if count := utf8.RuneCount(result); count < width { + insertPos := 0 + switch f.Flags & PadMask { + case PadAfterPrefix: + insertPos = postPrefix + case PadBeforeSuffix: + insertPos = preSuffix + case PadAfterSuffix: + insertPos = len(result) + } + num := width - count + pad := [utf8.UTFMax]byte{' '} + sz := 1 + if r := f.PadRune; r != 0 { + sz = utf8.EncodeRune(pad[:], r) + } + extra := sz * num + if n := len(result) + extra; n < cap(result) { + result = result[:n] + copy(result[insertPos+extra:], result[insertPos:]) + } else { + buf := make([]byte, n) + copy(buf, result[:insertPos]) + copy(buf[insertPos+extra:], result[insertPos:]) + result = buf + } + for ; num > 0; num-- { + insertPos += copy(result[insertPos:], pad[:sz]) + } + } + return result +} + +// decimalVisibleDigits converts d according to the RoundingContext. Note that +// the exponent may change as a result of this operation. +func decimalVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits} + + exp := n.Exp + exp += int32(r.DigitShift) + + // Cap integer digits. Remove *most-significant* digits. + if r.MaxIntegerDigits > 0 { + if p := int(exp) - int(r.MaxIntegerDigits); p > 0 { + if p > len(n.Digits) { + p = len(n.Digits) + } + if n.Digits = n.Digits[p:]; len(n.Digits) == 0 { + exp = 0 + } else { + exp -= int32(p) + } + // Strip leading zeros. + for len(n.Digits) > 0 && n.Digits[0] == 0 { + n.Digits = n.Digits[1:] + exp-- + } + } + } + + // Rounding if not already done by Convert. + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 { + if cap := int(exp) + maxFrac; cap < p { + p = int(exp) + maxFrac + } + if p < 0 { + p = 0 + } + } + n.round(r.Mode, p) + + // set End (trailing zeros) + n.End = int32(len(n.Digits)) + if n.End == 0 { + exp = 0 + if r.MinFractionDigits > 0 { + n.End = int32(r.MinFractionDigits) + } + if p := int32(r.MinSignificantDigits) - 1; p > n.End { + n.End = p + } + } else { + if end := exp + int32(r.MinFractionDigits); end > n.End { + n.End = end + } + if n.End < int32(r.MinSignificantDigits) { + n.End = int32(r.MinSignificantDigits) + } + } + n.Exp = exp + return n +} + +// appendDecimal appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendDecimal(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, len(dst) + } + digits := n.Digits + exp := n.Exp + + // Split in integer and fraction part. + var intDigits, fracDigits []byte + numInt := 0 + numFrac := int(n.End - n.Exp) + if exp > 0 { + numInt = int(exp) + if int(exp) >= len(digits) { // ddddd | ddddd00 + intDigits = digits + } else { // ddd.dd + intDigits = digits[:exp] + fracDigits = digits[exp:] + } + } else { + fracDigits = digits + } + + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + minInt := int(f.MinIntegerDigits) + if minInt == 0 && f.MinSignificantDigits > 0 { + minInt = 1 + } + // add leading zeros + for i := minInt; i > numInt; i-- { + dst = f.AppendDigit(dst, 0) + if f.needsSep(i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + // Add trailing zeros + i = 0 + for n := -int(n.Exp); i < n; i++ { + dst = f.AppendDigit(dst, 0) + } + for _, d := range fracDigits { + i++ + dst = f.AppendDigit(dst, d) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +func scientificVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits, IsScientific: true} + + // Normalize to have at least one digit. This simplifies engineering + // notation. + if len(n.Digits) == 0 { + n.Digits = append(n.Digits, 0) + n.Exp = 1 + } + + // Significant digits are transformed by the parser for scientific notation + // and do not need to be handled here. + maxInt, numInt := int(r.MaxIntegerDigits), int(r.MinIntegerDigits) + if numInt == 0 { + numInt = 1 + } + + // If a maximum number of integers is specified, the minimum must be 1 + // and the exponent is grouped by this number (e.g. for engineering) + if maxInt > numInt { + // Correct the exponent to reflect a single integer digit. + numInt = 1 + // engineering + // 0.01234 ([12345]e-1) -> 1.2345e-2 12.345e-3 + // 12345 ([12345]e+5) -> 1.2345e4 12.345e3 + d := int(n.Exp-1) % maxInt + if d < 0 { + d += maxInt + } + numInt += d + } + + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 && numInt+maxFrac < p { + p = numInt + maxFrac + } + n.round(r.Mode, p) + + n.Comma = uint8(numInt) + n.End = int32(len(n.Digits)) + if minSig := int32(r.MinFractionDigits) + int32(numInt); n.End < minSig { + n.End = minSig + } + return n +} + +// appendScientific appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, 0 + } + digits := n.Digits + numInt := int(n.Comma) + numFrac := int(n.End) - int(n.Comma) + + var intDigits, fracDigits []byte + if numInt <= len(digits) { + intDigits = digits[:numInt] + fracDigits = digits[numInt:] + } else { + intDigits = digits + } + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + i = 0 + for ; i < len(fracDigits); i++ { + dst = f.AppendDigit(dst, fracDigits[i]) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + + // exp + buf := [12]byte{} + // TODO: use exponential if superscripting is not available (no Latin + // numbers or no tags) and use exponential in all other cases. + exp := n.Exp - int32(n.Comma) + exponential := f.Symbol(SymExponential) + if exponential == "E" { + dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE + dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) + dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE + dst = f.AppendDigit(dst, 1) + dst = f.AppendDigit(dst, 0) + switch { + case exp < 0: + dst = append(dst, superMinus...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, superPlus...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = append(dst, superDigits[0]...) + } + for _, c := range b { + dst = append(dst, superDigits[c-'0']...) + } + } else { + dst = append(dst, exponential...) + switch { + case exp < 0: + dst = append(dst, f.Symbol(SymMinusSign)...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, f.Symbol(SymPlusSign)...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = f.AppendDigit(dst, 0) + } + for _, c := range b { + dst = f.AppendDigit(dst, c-'0') + } + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +const ( + superMinus = "\u207B" // SUPERSCRIPT HYPHEN-MINUS + superPlus = "\u207A" // SUPERSCRIPT PLUS SIGN +) + +var ( + // Note: the digits are not sequential!!! + superDigits = []string{ + "\u2070", // SUPERSCRIPT DIGIT ZERO + "\u00B9", // SUPERSCRIPT DIGIT ONE + "\u00B2", // SUPERSCRIPT DIGIT TWO + "\u00B3", // SUPERSCRIPT DIGIT THREE + "\u2074", // SUPERSCRIPT DIGIT FOUR + "\u2075", // SUPERSCRIPT DIGIT FIVE + "\u2076", // SUPERSCRIPT DIGIT SIX + "\u2077", // SUPERSCRIPT DIGIT SEVEN + "\u2078", // SUPERSCRIPT DIGIT EIGHT + "\u2079", // SUPERSCRIPT DIGIT NINE + } +) + +func (f *Formatter) getAffixes(neg bool) (affix, suffix string) { + str := f.Affix + if str != "" { + if f.NegOffset > 0 { + if neg { + str = str[f.NegOffset:] + } else { + str = str[:f.NegOffset] + } + } + sufStart := 1 + str[0] + affix = str[1:sufStart] + suffix = str[sufStart+1:] + } + // TODO: introduce a NeedNeg sign to indicate if the left pattern already + // has a sign marked? + if f.NegOffset == 0 && (neg || f.Flags&AlwaysSign != 0) { + affix = "-" + affix + } + return affix, suffix +} + +func (f *Formatter) renderSpecial(dst []byte, d *Digits) (b []byte, ok bool) { + if d.NaN { + return fmtNaN(dst, f), true + } + if d.Inf { + return fmtInfinite(dst, f, d), true + } + return dst, false +} + +func fmtNaN(dst []byte, f *Formatter) []byte { + return append(dst, f.Symbol(SymNan)...) +} + +func fmtInfinite(dst []byte, f *Formatter, d *Digits) []byte { + affix, suffix := f.getAffixes(d.Neg) + dst = appendAffix(dst, f, affix, d.Neg) + dst = append(dst, f.Symbol(SymInfinity)...) + dst = appendAffix(dst, f, suffix, d.Neg) + return dst +} + +func appendAffix(dst []byte, f *Formatter, affix string, neg bool) []byte { + quoting := false + escaping := false + for _, r := range affix { + switch { + case escaping: + // escaping occurs both inside and outside of quotes + dst = append(dst, string(r)...) + escaping = false + case r == '\\': + escaping = true + case r == '\'': + quoting = !quoting + case quoting: + dst = append(dst, string(r)...) + case r == '%': + if f.DigitShift == 3 { + dst = append(dst, f.Symbol(SymPerMille)...) + } else { + dst = append(dst, f.Symbol(SymPercentSign)...) + } + case r == '-' || r == '+': + if neg { + dst = append(dst, f.Symbol(SymMinusSign)...) + } else if f.Flags&ElideSign == 0 { + dst = append(dst, f.Symbol(SymPlusSign)...) + } else { + dst = append(dst, ' ') + } + default: + dst = append(dst, string(r)...) + } + } + return dst +} diff --git a/vendor/golang.org/x/text/internal/number/number.go b/vendor/golang.org/x/text/internal/number/number.go new file mode 100644 index 00000000..e1d933c3 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/number.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package number contains tools and data for formatting numbers. +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/language" +) + +// Info holds number formatting configuration data. +type Info struct { + system systemData // numbering system information + symIndex symOffset // index to symbols +} + +// InfoFromLangID returns a Info for the given compact language identifier and +// numbering system identifier. If system is the empty string, the default +// numbering system will be taken for that language. +func InfoFromLangID(compactIndex compact.ID, numberSystem string) Info { + p := langToDefaults[compactIndex] + // Lookup the entry for the language. + pSymIndex := symOffset(0) // Default: Latin, default symbols + system, ok := systemMap[numberSystem] + if !ok { + // Take the value for the default numbering system. This is by far the + // most common case as an alternative numbering system is hardly used. + if p&hasNonLatnMask == 0 { // Latn digits. + pSymIndex = p + } else { // Non-Latn or multiple numbering systems. + // Take the first entry from the alternatives list. + data := langToAlt[p&^hasNonLatnMask] + pSymIndex = data.symIndex + system = data.system + } + } else { + langIndex := compactIndex + ns := system + outerLoop: + for ; ; p = langToDefaults[langIndex] { + if p&hasNonLatnMask == 0 { + if ns == 0 { + // The index directly points to the symbol data. + pSymIndex = p + break + } + // Move to the parent and retry. + langIndex = langIndex.Parent() + } else { + // The index points to a list of symbol data indexes. + for _, e := range langToAlt[p&^hasNonLatnMask:] { + if e.compactTag != langIndex { + if langIndex == 0 { + // The CLDR root defines full symbol information for + // all numbering systems (even though mostly by + // means of aliases). Fall back to the default entry + // for Latn if there is no data for the numbering + // system of this language. + if ns == 0 { + break + } + // Fall back to Latin and start from the original + // language. See + // https://unicode.org/reports/tr35/#Locale_Inheritance. + ns = numLatn + langIndex = compactIndex + continue outerLoop + } + // Fall back to parent. + langIndex = langIndex.Parent() + } else if e.system == ns { + pSymIndex = e.symIndex + break outerLoop + } + } + } + } + } + if int(system) >= len(numSysData) { // algorithmic + // Will generate ASCII digits in case the user inadvertently calls + // WriteDigit or Digit on it. + d := numSysData[0] + d.id = system + return Info{ + system: d, + symIndex: pSymIndex, + } + } + return Info{ + system: numSysData[system], + symIndex: pSymIndex, + } +} + +// InfoFromTag returns a Info for the given language tag. +func InfoFromTag(t language.Tag) Info { + return InfoFromLangID(tagToID(t), t.TypeForKey("nu")) +} + +// IsDecimal reports if the numbering system can convert decimal to native +// symbols one-to-one. +func (n Info) IsDecimal() bool { + return int(n.system.id) < len(numSysData) +} + +// WriteDigit writes the UTF-8 sequence for n corresponding to the given ASCII +// digit to dst and reports the number of bytes written. dst must be large +// enough to hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) WriteDigit(dst []byte, asciiDigit rune) int { + copy(dst, n.system.zero[:n.system.digitSize]) + dst[n.system.digitSize-1] += byte(asciiDigit - '0') + return int(n.system.digitSize) +} + +// AppendDigit appends the UTF-8 sequence for n corresponding to the given digit +// to dst and reports the number of bytes written. dst must be large enough to +// hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) AppendDigit(dst []byte, digit byte) []byte { + dst = append(dst, n.system.zero[:n.system.digitSize]...) + dst[len(dst)-1] += digit + return dst +} + +// Digit returns the digit for the numbering system for the corresponding ASCII +// value. For example, ni.Digit('3') could return '三'. Note that the argument +// is the rune constant '3', which equals 51, not the integer constant 3. +func (n Info) Digit(asciiDigit rune) rune { + var x [utf8.UTFMax]byte + n.WriteDigit(x[:], asciiDigit) + r, _ := utf8.DecodeRune(x[:]) + return r +} + +// Symbol returns the string for the given symbol type. +func (n Info) Symbol(t SymbolType) string { + return symData.Elem(int(symIndex[n.symIndex][t])) +} + +func formatForLang(t language.Tag, index []byte) *Pattern { + return &formats[index[tagToID(t)]] +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/vendor/golang.org/x/text/internal/number/pattern.go b/vendor/golang.org/x/text/internal/number/pattern.go new file mode 100644 index 00000000..06e59559 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/pattern.go @@ -0,0 +1,485 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "errors" + "unicode/utf8" +) + +// This file contains a parser for the CLDR number patterns as described in +// https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +// +// The following BNF is derived from this standard. +// +// pattern := subpattern (';' subpattern)? +// subpattern := affix? number exponent? affix? +// number := decimal | sigDigits +// decimal := '#'* '0'* ('.' fraction)? | '#' | '0' +// fraction := '0'* '#'* +// sigDigits := '#'* '@' '@'* '#'* +// exponent := 'E' '+'? '0'* '0' +// padSpec := '*' \L +// +// Notes: +// - An affix pattern may contain any runes, but runes with special meaning +// should be escaped. +// - Sequences of digits, '#', and '@' in decimal and sigDigits may have +// interstitial commas. + +// TODO: replace special characters in affixes (-, +, ¤) with control codes. + +// Pattern holds information for formatting numbers. It is designed to hold +// information from CLDR number patterns. +// +// This pattern is precompiled for all patterns for all languages. Even though +// the number of patterns is not very large, we want to keep this small. +// +// This type is only intended for internal use. +type Pattern struct { + RoundingContext + + Affix string // includes prefix and suffix. First byte is prefix length. + Offset uint16 // Offset into Affix for prefix and suffix + NegOffset uint16 // Offset into Affix for negative prefix and suffix or 0. + PadRune rune + FormatWidth uint16 + + GroupingSize [2]uint8 + Flags PatternFlag +} + +// A RoundingContext indicates how a number should be converted to digits. +// It contains all information needed to determine the "visible digits" as +// required by the pluralization rules. +type RoundingContext struct { + // TODO: unify these two fields so that there is a more unambiguous meaning + // of how precision is handled. + MaxSignificantDigits int16 // -1 is unlimited + MaxFractionDigits int16 // -1 is unlimited + + Increment uint32 + IncrementScale uint8 // May differ from printed scale. + + Mode RoundingMode + + DigitShift uint8 // Number of decimals to shift. Used for % and ‰. + + // Number of digits. + MinIntegerDigits uint8 + + MaxIntegerDigits uint8 + MinFractionDigits uint8 + MinSignificantDigits uint8 + + MinExponentDigits uint8 +} + +// RoundSignificantDigits returns the number of significant digits an +// implementation of Convert may round to or n < 0 if there is no maximum or +// a maximum is not recommended. +func (r *RoundingContext) RoundSignificantDigits() (n int) { + if r.MaxFractionDigits == 0 && r.MaxSignificantDigits > 0 { + return int(r.MaxSignificantDigits) + } else if r.isScientific() && r.MaxIntegerDigits == 1 { + if r.MaxSignificantDigits == 0 || + int(r.MaxFractionDigits+1) == int(r.MaxSignificantDigits) { + // Note: don't add DigitShift: it is only used for decimals. + return int(r.MaxFractionDigits) + 1 + } + } + return -1 +} + +// RoundFractionDigits returns the number of fraction digits an implementation +// of Convert may round to or n < 0 if there is no maximum or a maximum is not +// recommended. +func (r *RoundingContext) RoundFractionDigits() (n int) { + if r.MinExponentDigits == 0 && + r.MaxSignificantDigits == 0 && + r.MaxFractionDigits >= 0 { + return int(r.MaxFractionDigits) + int(r.DigitShift) + } + return -1 +} + +// SetScale fixes the RoundingContext to a fixed number of fraction digits. +func (r *RoundingContext) SetScale(scale int) { + r.MinFractionDigits = uint8(scale) + r.MaxFractionDigits = int16(scale) +} + +func (r *RoundingContext) SetPrecision(prec int) { + r.MaxSignificantDigits = int16(prec) +} + +func (r *RoundingContext) isScientific() bool { + return r.MinExponentDigits > 0 +} + +func (f *Pattern) needsSep(pos int) bool { + p := pos - 1 + size := int(f.GroupingSize[0]) + if size == 0 || p == 0 { + return false + } + if p == size { + return true + } + if p -= size; p < 0 { + return false + } + // TODO: make second groupingsize the same as first if 0 so that we can + // avoid this check. + if x := int(f.GroupingSize[1]); x != 0 { + size = x + } + return p%size == 0 +} + +// A PatternFlag is a bit mask for the flag field of a Pattern. +type PatternFlag uint8 + +const ( + AlwaysSign PatternFlag = 1 << iota + ElideSign // Use space instead of plus sign. AlwaysSign must be true. + AlwaysExpSign + AlwaysDecimalSeparator + ParenthesisForNegative // Common pattern. Saves space. + + PadAfterNumber + PadAfterAffix + + PadBeforePrefix = 0 // Default + PadAfterPrefix = PadAfterAffix + PadBeforeSuffix = PadAfterNumber + PadAfterSuffix = PadAfterNumber | PadAfterAffix + PadMask = PadAfterNumber | PadAfterAffix +) + +type parser struct { + *Pattern + + leadingSharps int + + pos int + err error + doNotTerminate bool + groupingCount uint + hasGroup bool + buf []byte +} + +func (p *parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *parser) updateGrouping() { + if p.hasGroup && + 0 < p.groupingCount && p.groupingCount < 255 { + p.GroupingSize[1] = p.GroupingSize[0] + p.GroupingSize[0] = uint8(p.groupingCount) + } + p.groupingCount = 0 + p.hasGroup = true +} + +var ( + // TODO: more sensible and localizeable error messages. + errMultiplePadSpecifiers = errors.New("format: pattern has multiple pad specifiers") + errInvalidPadSpecifier = errors.New("format: invalid pad specifier") + errInvalidQuote = errors.New("format: invalid quote") + errAffixTooLarge = errors.New("format: prefix or suffix exceeds maximum UTF-8 length of 256 bytes") + errDuplicatePercentSign = errors.New("format: duplicate percent sign") + errDuplicatePermilleSign = errors.New("format: duplicate permille sign") + errUnexpectedEnd = errors.New("format: unexpected end of pattern") +) + +// ParsePattern extracts formatting information from a CLDR number pattern. +// +// See https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +func ParsePattern(s string) (f *Pattern, err error) { + p := parser{Pattern: &Pattern{}} + + s = p.parseSubPattern(s) + + if s != "" { + // Parse negative sub pattern. + if s[0] != ';' { + p.setError(errors.New("format: error parsing first sub pattern")) + return nil, p.err + } + neg := parser{Pattern: &Pattern{}} // just for extracting the affixes. + s = neg.parseSubPattern(s[len(";"):]) + p.NegOffset = uint16(len(p.buf)) + p.buf = append(p.buf, neg.buf...) + } + if s != "" { + p.setError(errors.New("format: spurious characters at end of pattern")) + } + if p.err != nil { + return nil, p.err + } + if affix := string(p.buf); affix == "\x00\x00" || affix == "\x00\x00\x00\x00" { + // No prefix or suffixes. + p.NegOffset = 0 + } else { + p.Affix = affix + } + if p.Increment == 0 { + p.IncrementScale = 0 + } + return p.Pattern, nil +} + +func (p *parser) parseSubPattern(s string) string { + s = p.parsePad(s, PadBeforePrefix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterPrefix) + + s = p.parse(p.number, s) + p.updateGrouping() + + s = p.parsePad(s, PadBeforeSuffix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterSuffix) + return s +} + +func (p *parser) parsePad(s string, f PatternFlag) (tail string) { + if len(s) >= 2 && s[0] == '*' { + r, sz := utf8.DecodeRuneInString(s[1:]) + if p.PadRune != 0 { + p.err = errMultiplePadSpecifiers + } else { + p.Flags |= f + p.PadRune = r + } + return s[1+sz:] + } + return s +} + +func (p *parser) parseAffix(s string) string { + x := len(p.buf) + p.buf = append(p.buf, 0) // placeholder for affix length + + s = p.parse(p.affix, s) + + n := len(p.buf) - x - 1 + if n > 0xFF { + p.setError(errAffixTooLarge) + } + p.buf[x] = uint8(n) + return s +} + +// state implements a state transition. It returns the new state. A state +// function may set an error on the parser or may simply return on an incorrect +// token and let the next phase fail. +type state func(r rune) state + +// parse repeatedly applies a state function on the given string until a +// termination condition is reached. +func (p *parser) parse(fn state, s string) (tail string) { + for i, r := range s { + p.doNotTerminate = false + if fn = fn(r); fn == nil || p.err != nil { + return s[i:] + } + p.FormatWidth++ + } + if p.doNotTerminate { + p.setError(errUnexpectedEnd) + } + return "" +} + +func (p *parser) affix(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '#', '@', '.', '*', ',', ';': + return nil + case '\'': + p.FormatWidth-- + return p.escapeFirst + case '%': + if p.DigitShift != 0 { + p.setError(errDuplicatePercentSign) + } + p.DigitShift = 2 + case '\u2030': // ‰ Per mille + if p.DigitShift != 0 { + p.setError(errDuplicatePermilleSign) + } + p.DigitShift = 3 + // TODO: handle currency somehow: ¤, ¤¤, ¤¤¤, ¤¤¤¤ + } + p.buf = append(p.buf, string(r)...) + return p.affix +} + +func (p *parser) escapeFirst(r rune) state { + switch r { + case '\'': + p.buf = append(p.buf, "\\'"...) + return p.affix + default: + p.buf = append(p.buf, '\'') + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +func (p *parser) escape(r rune) state { + switch r { + case '\'': + p.FormatWidth-- + p.buf = append(p.buf, '\'') + return p.affix + default: + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +// number parses a number. The BNF says the integer part should always have +// a '0', but that does not appear to be the case according to the rest of the +// documentation. We will allow having only '#' numbers. +func (p *parser) number(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.leadingSharps++ + case '@': + p.groupingCount++ + p.leadingSharps = 0 + p.MaxFractionDigits = -1 + return p.sigDigits(r) + case ',': + if p.leadingSharps == 0 { // no leading commas + return nil + } + p.updateGrouping() + case 'E': + p.MaxIntegerDigits = uint8(p.leadingSharps) + return p.exponent + case '.': // allow ".##" etc. + p.updateGrouping() + return p.fraction + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return p.integer(r) + default: + return nil + } + return p.number +} + +func (p *parser) integer(r rune) state { + if !('0' <= r && r <= '9') { + var next state + switch r { + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + next = p.exponent + case '.': + next = p.fraction + case ',': + next = p.integer + } + p.updateGrouping() + return next + } + p.Increment = p.Increment*10 + uint32(r-'0') + p.groupingCount++ + p.MinIntegerDigits++ + return p.integer +} + +func (p *parser) sigDigits(r rune) state { + switch r { + case '@': + p.groupingCount++ + p.MaxSignificantDigits++ + p.MinSignificantDigits++ + case '#': + return p.sigDigitsFinal(r) + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigits +} + +func (p *parser) sigDigitsFinal(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.MaxSignificantDigits++ + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigitsFinal +} + +func (p *parser) normalizeSigDigitsWithExponent() state { + p.MinIntegerDigits, p.MaxIntegerDigits = 1, 1 + p.MinFractionDigits = p.MinSignificantDigits - 1 + p.MaxFractionDigits = p.MaxSignificantDigits - 1 + p.MinSignificantDigits, p.MaxSignificantDigits = 0, 0 + return p.exponent +} + +func (p *parser) fraction(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Increment = p.Increment*10 + uint32(r-'0') + p.IncrementScale++ + p.MinFractionDigits++ + p.MaxFractionDigits++ + case '#': + p.MaxFractionDigits++ + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + return p.exponent + default: + return nil + } + return p.fraction +} + +func (p *parser) exponent(r rune) state { + switch r { + case '+': + // Set mode and check it wasn't already set. + if p.Flags&AlwaysExpSign != 0 || p.MinExponentDigits > 0 { + break + } + p.Flags |= AlwaysExpSign + p.doNotTerminate = true + return p.exponent + case '0': + p.MinExponentDigits++ + return p.exponent + } + // termination condition + if p.MinExponentDigits == 0 { + p.setError(errors.New("format: need at least one digit")) + } + return nil +} diff --git a/vendor/golang.org/x/text/internal/number/roundingmode_string.go b/vendor/golang.org/x/text/internal/number/roundingmode_string.go new file mode 100644 index 00000000..bcc22471 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/roundingmode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type RoundingMode"; DO NOT EDIT. + +package number + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ToNearestEven-0] + _ = x[ToNearestZero-1] + _ = x[ToNearestAway-2] + _ = x[ToPositiveInf-3] + _ = x[ToNegativeInf-4] + _ = x[ToZero-5] + _ = x[AwayFromZero-6] + _ = x[numModes-7] +} + +const _RoundingMode_name = "ToNearestEvenToNearestZeroToNearestAwayToPositiveInfToNegativeInfToZeroAwayFromZeronumModes" + +var _RoundingMode_index = [...]uint8{0, 13, 26, 39, 52, 65, 71, 83, 91} + +func (i RoundingMode) String() string { + if i >= RoundingMode(len(_RoundingMode_index)-1) { + return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]] +} diff --git a/vendor/golang.org/x/text/internal/number/tables.go b/vendor/golang.org/x/text/internal/number/tables.go new file mode 100644 index 00000000..8efce81b --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/tables.go @@ -0,0 +1,1219 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import "golang.org/x/text/internal/stringset" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var numSysData = []systemData{ // 59 elements + 0: {id: 0x0, digitSize: 0x1, zero: [4]uint8{0x30, 0x0, 0x0, 0x0}}, + 1: {id: 0x1, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9e, 0xa5, 0x90}}, + 2: {id: 0x2, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9c, 0xb0}}, + 3: {id: 0x3, digitSize: 0x2, zero: [4]uint8{0xd9, 0xa0, 0x0, 0x0}}, + 4: {id: 0x4, digitSize: 0x2, zero: [4]uint8{0xdb, 0xb0, 0x0, 0x0}}, + 5: {id: 0x5, digitSize: 0x3, zero: [4]uint8{0xe1, 0xad, 0x90, 0x0}}, + 6: {id: 0x6, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa7, 0xa6, 0x0}}, + 7: {id: 0x7, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb1, 0x90}}, + 8: {id: 0x8, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x81, 0xa6}}, + 9: {id: 0x9, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x84, 0xb6}}, + 10: {id: 0xa, digitSize: 0x3, zero: [4]uint8{0xea, 0xa9, 0x90, 0x0}}, + 11: {id: 0xb, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa5, 0xa6, 0x0}}, + 12: {id: 0xc, digitSize: 0x3, zero: [4]uint8{0xef, 0xbc, 0x90, 0x0}}, + 13: {id: 0xd, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb5, 0x90}}, + 14: {id: 0xe, digitSize: 0x3, zero: [4]uint8{0xe0, 0xab, 0xa6, 0x0}}, + 15: {id: 0xf, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa9, 0xa6, 0x0}}, + 16: {id: 0x10, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xad, 0x90}}, + 17: {id: 0x11, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0x90, 0x0}}, + 18: {id: 0x12, digitSize: 0x3, zero: [4]uint8{0xea, 0xa4, 0x80, 0x0}}, + 19: {id: 0x13, digitSize: 0x3, zero: [4]uint8{0xe1, 0x9f, 0xa0, 0x0}}, + 20: {id: 0x14, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb3, 0xa6, 0x0}}, + 21: {id: 0x15, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x80, 0x0}}, + 22: {id: 0x16, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x90, 0x0}}, + 23: {id: 0x17, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbb, 0x90, 0x0}}, + 24: {id: 0x18, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x80, 0x0}}, + 25: {id: 0x19, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa5, 0x86, 0x0}}, + 26: {id: 0x1a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x8e}}, + 27: {id: 0x1b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x98}}, + 28: {id: 0x1c, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xb6}}, + 29: {id: 0x1d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xac}}, + 30: {id: 0x1e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xa2}}, + 31: {id: 0x1f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb5, 0xa6, 0x0}}, + 32: {id: 0x20, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x99, 0x90}}, + 33: {id: 0x21, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa0, 0x90, 0x0}}, + 34: {id: 0x22, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xa9, 0xa0}}, + 35: {id: 0x23, digitSize: 0x3, zero: [4]uint8{0xea, 0xaf, 0xb0, 0x0}}, + 36: {id: 0x24, digitSize: 0x3, zero: [4]uint8{0xe1, 0x81, 0x80, 0x0}}, + 37: {id: 0x25, digitSize: 0x3, zero: [4]uint8{0xe1, 0x82, 0x90, 0x0}}, + 38: {id: 0x26, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0xb0, 0x0}}, + 39: {id: 0x27, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x91, 0x90}}, + 40: {id: 0x28, digitSize: 0x2, zero: [4]uint8{0xdf, 0x80, 0x0, 0x0}}, + 41: {id: 0x29, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x90, 0x0}}, + 42: {id: 0x2a, digitSize: 0x3, zero: [4]uint8{0xe0, 0xad, 0xa6, 0x0}}, + 43: {id: 0x2b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x90, 0x92, 0xa0}}, + 44: {id: 0x2c, digitSize: 0x3, zero: [4]uint8{0xea, 0xa3, 0x90, 0x0}}, + 45: {id: 0x2d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x87, 0x90}}, + 46: {id: 0x2e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x8b, 0xb0}}, + 47: {id: 0x2f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb7, 0xa6, 0x0}}, + 48: {id: 0x30, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x83, 0xb0}}, + 49: {id: 0x31, digitSize: 0x3, zero: [4]uint8{0xe1, 0xae, 0xb0, 0x0}}, + 50: {id: 0x32, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9b, 0x80}}, + 51: {id: 0x33, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa7, 0x90, 0x0}}, + 52: {id: 0x34, digitSize: 0x3, zero: [4]uint8{0xe0, 0xaf, 0xa6, 0x0}}, + 53: {id: 0x35, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb1, 0xa6, 0x0}}, + 54: {id: 0x36, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb9, 0x90, 0x0}}, + 55: {id: 0x37, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbc, 0xa0, 0x0}}, + 56: {id: 0x38, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x93, 0x90}}, + 57: {id: 0x39, digitSize: 0x3, zero: [4]uint8{0xea, 0x98, 0xa0, 0x0}}, + 58: {id: 0x3a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xa3, 0xa0}}, +} // Size: 378 bytes + +const ( + numAdlm = 0x1 + numAhom = 0x2 + numArab = 0x3 + numArabext = 0x4 + numArmn = 0x3b + numArmnlow = 0x3c + numBali = 0x5 + numBeng = 0x6 + numBhks = 0x7 + numBrah = 0x8 + numCakm = 0x9 + numCham = 0xa + numCyrl = 0x3d + numDeva = 0xb + numEthi = 0x3e + numFullwide = 0xc + numGeor = 0x3f + numGonm = 0xd + numGrek = 0x40 + numGreklow = 0x41 + numGujr = 0xe + numGuru = 0xf + numHanidays = 0x42 + numHanidec = 0x43 + numHans = 0x44 + numHansfin = 0x45 + numHant = 0x46 + numHantfin = 0x47 + numHebr = 0x48 + numHmng = 0x10 + numJava = 0x11 + numJpan = 0x49 + numJpanfin = 0x4a + numKali = 0x12 + numKhmr = 0x13 + numKnda = 0x14 + numLana = 0x15 + numLanatham = 0x16 + numLaoo = 0x17 + numLatn = 0x0 + numLepc = 0x18 + numLimb = 0x19 + numMathbold = 0x1a + numMathdbl = 0x1b + numMathmono = 0x1c + numMathsanb = 0x1d + numMathsans = 0x1e + numMlym = 0x1f + numModi = 0x20 + numMong = 0x21 + numMroo = 0x22 + numMtei = 0x23 + numMymr = 0x24 + numMymrshan = 0x25 + numMymrtlng = 0x26 + numNewa = 0x27 + numNkoo = 0x28 + numOlck = 0x29 + numOrya = 0x2a + numOsma = 0x2b + numRoman = 0x4b + numRomanlow = 0x4c + numSaur = 0x2c + numShrd = 0x2d + numSind = 0x2e + numSinh = 0x2f + numSora = 0x30 + numSund = 0x31 + numTakr = 0x32 + numTalu = 0x33 + numTaml = 0x4d + numTamldec = 0x34 + numTelu = 0x35 + numThai = 0x36 + numTibt = 0x37 + numTirh = 0x38 + numVaii = 0x39 + numWara = 0x3a + numNumberSystems +) + +var systemMap = map[string]system{ + "adlm": numAdlm, + "ahom": numAhom, + "arab": numArab, + "arabext": numArabext, + "armn": numArmn, + "armnlow": numArmnlow, + "bali": numBali, + "beng": numBeng, + "bhks": numBhks, + "brah": numBrah, + "cakm": numCakm, + "cham": numCham, + "cyrl": numCyrl, + "deva": numDeva, + "ethi": numEthi, + "fullwide": numFullwide, + "geor": numGeor, + "gonm": numGonm, + "grek": numGrek, + "greklow": numGreklow, + "gujr": numGujr, + "guru": numGuru, + "hanidays": numHanidays, + "hanidec": numHanidec, + "hans": numHans, + "hansfin": numHansfin, + "hant": numHant, + "hantfin": numHantfin, + "hebr": numHebr, + "hmng": numHmng, + "java": numJava, + "jpan": numJpan, + "jpanfin": numJpanfin, + "kali": numKali, + "khmr": numKhmr, + "knda": numKnda, + "lana": numLana, + "lanatham": numLanatham, + "laoo": numLaoo, + "latn": numLatn, + "lepc": numLepc, + "limb": numLimb, + "mathbold": numMathbold, + "mathdbl": numMathdbl, + "mathmono": numMathmono, + "mathsanb": numMathsanb, + "mathsans": numMathsans, + "mlym": numMlym, + "modi": numModi, + "mong": numMong, + "mroo": numMroo, + "mtei": numMtei, + "mymr": numMymr, + "mymrshan": numMymrshan, + "mymrtlng": numMymrtlng, + "newa": numNewa, + "nkoo": numNkoo, + "olck": numOlck, + "orya": numOrya, + "osma": numOsma, + "roman": numRoman, + "romanlow": numRomanlow, + "saur": numSaur, + "shrd": numShrd, + "sind": numSind, + "sinh": numSinh, + "sora": numSora, + "sund": numSund, + "takr": numTakr, + "talu": numTalu, + "taml": numTaml, + "tamldec": numTamldec, + "telu": numTelu, + "thai": numThai, + "tibt": numTibt, + "tirh": numTirh, + "vaii": numVaii, + "wara": numWara, +} + +var symIndex = [][12]uint8{ // 81 elements + 0: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 1: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 2: [12]uint8{0x0, 0x1, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 3: [12]uint8{0x1, 0x0, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 4: [12]uint8{0x0, 0x1, 0x2, 0x11, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 5: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x12, 0xb}, + 6: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 7: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x13, 0xb}, + 8: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 9: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 10: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 11: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 12: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 13: [12]uint8{0x0, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 14: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x16, 0xb}, + 15: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 16: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0x0}, + 17: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 18: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 19: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 20: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x19, 0x1a, 0xa, 0xb}, + 21: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 22: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 23: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 24: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0x1d, 0xb}, + 25: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0x1e, 0x0}, + 26: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 27: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 28: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x1f, 0xb}, + 29: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 30: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x20, 0xb}, + 31: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x21, 0x7, 0x8, 0x9, 0x22, 0xb}, + 32: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x23, 0xb}, + 33: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x14, 0x8, 0x9, 0x24, 0xb}, + 34: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0x24, 0xb}, + 35: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x25, 0xb}, + 36: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x26, 0xb}, + 37: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x27, 0xb}, + 38: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 39: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x29, 0xb}, + 40: [12]uint8{0x1, 0x0, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 41: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2a, 0xb}, + 42: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2b, 0xb}, + 43: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x2c, 0x14, 0x8, 0x9, 0x24, 0xb}, + 44: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 45: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 46: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 47: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2d, 0x0}, + 48: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2e, 0xb}, + 49: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2f, 0xb}, + 50: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x30, 0x7, 0x8, 0x9, 0xa, 0xb}, + 51: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x31, 0xb}, + 52: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x32, 0xb}, + 53: [12]uint8{0x1, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 54: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x33, 0xb}, + 55: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x34, 0xb}, + 56: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 57: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0x3d, 0xb}, + 58: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 59: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 60: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x40, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 61: [12]uint8{0x35, 0x36, 0x37, 0x41, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 62: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 63: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 64: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x43, 0x7, 0x44, 0x9, 0x24, 0xb}, + 65: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x5, 0x3b, 0x7, 0x3c, 0x9, 0x33, 0xb}, + 66: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x46, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 67: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0x1d, 0xb}, + 68: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 69: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 70: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 71: [12]uint8{0x35, 0x1, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 72: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0x24, 0xb}, + 73: [12]uint8{0x35, 0x36, 0x2, 0x3, 0x45, 0x46, 0x43, 0x7, 0x8, 0x9, 0xa, 0x35}, + 74: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x31, 0x35}, + 75: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x32, 0x35}, + 76: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x48, 0x46, 0x43, 0x7, 0x3c, 0x9, 0x33, 0x35}, + 77: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x49}, + 78: [12]uint8{0x0, 0x1, 0x4a, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 79: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x4b, 0xb}, + 80: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x4c, 0x4d, 0xb}, +} // Size: 996 bytes + +var symData = stringset.Set{ + Data: "" + // Size: 599 bytes + ".,;%+-E׉∞NaN:\u00a0\u200e%\u200e\u200e+\u200e-ليس\u00a0رقمًا٪NDТерхьаш" + + "\u00a0дац·’mnne×10^0/00INF−\u200e−ناعددepälukuՈչԹარ\u00a0არის\u00a0რიცხვ" + + "იZMdMсан\u00a0емес¤¤¤сан\u00a0эмесບໍ່\u200bແມ່ນ\u200bໂຕ\u200bເລກNSဂဏန်" + + "းမဟုတ်သောННне\u00a0числочыыһыла\u00a0буотах·10^epilohosan\u00a0dälTFЕs" + + "on\u00a0emasҳақиқий\u00a0сон\u00a0эмас非數值非数值٫٬؛٪\u061c\u061c+\u061c-اس؉ل" + + "يس\u00a0رقم\u200f+\u200f-\u200f−٪\u200f\u061c−×۱۰^؉\u200f\u200e+\u200e" + + "\u200e-\u200e\u200e−\u200e+\u200e:၊ཨང་མེན་གྲངས་མེདཨང་མད", + Index: []uint16{ // 79 elements + // Entry 0 - 3F + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, + 0x0009, 0x000c, 0x000f, 0x0012, 0x0013, 0x0015, 0x001c, 0x0020, + 0x0024, 0x0036, 0x0038, 0x003a, 0x0050, 0x0052, 0x0055, 0x0058, + 0x0059, 0x005e, 0x0062, 0x0065, 0x0068, 0x006e, 0x0078, 0x0080, + 0x0086, 0x00ae, 0x00af, 0x00b2, 0x00c2, 0x00c8, 0x00d8, 0x0105, + 0x0107, 0x012e, 0x0132, 0x0142, 0x015e, 0x0163, 0x016a, 0x0173, + 0x0175, 0x0177, 0x0180, 0x01a0, 0x01a9, 0x01b2, 0x01b4, 0x01b6, + 0x01b8, 0x01bc, 0x01bf, 0x01c2, 0x01c6, 0x01c8, 0x01d6, 0x01da, + // Entry 40 - 7F + 0x01de, 0x01e4, 0x01e9, 0x01ee, 0x01f5, 0x01fa, 0x0201, 0x0208, + 0x0211, 0x0215, 0x0218, 0x021b, 0x0230, 0x0248, 0x0257, + }, +} // Size: 797 bytes + +// langToDefaults maps a compact language index to the default numbering system +// and default symbol set +var langToDefaults = [775]symOffset{ + // Entry 0 - 3F + 0x8000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000, + 0x0000, 0x0000, 0x8003, 0x0002, 0x0002, 0x0002, 0x0002, 0x0003, + 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, + 0x0003, 0x0003, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0004, + 0x0002, 0x0004, 0x0002, 0x0002, 0x0002, 0x0003, 0x0002, 0x0000, + 0x8005, 0x0000, 0x0000, 0x0000, 0x8006, 0x0005, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + // Entry 40 - 7F + 0x8009, 0x0000, 0x0000, 0x800a, 0x0000, 0x0000, 0x800c, 0x0001, + 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x800e, 0x0000, 0x0000, 0x0007, + 0x0007, 0x0000, 0x0000, 0x0000, 0x0000, 0x800f, 0x0008, 0x0008, + 0x8011, 0x0001, 0x0001, 0x0001, 0x803c, 0x0000, 0x0009, 0x0009, + 0x0009, 0x0000, 0x0000, 0x000a, 0x000b, 0x000a, 0x000c, 0x000a, + 0x000a, 0x000c, 0x000a, 0x000d, 0x000d, 0x000a, 0x000a, 0x0001, + 0x0001, 0x0000, 0x0001, 0x0001, 0x803f, 0x0000, 0x0000, 0x0000, + // Entry 80 - BF + 0x000e, 0x000e, 0x000e, 0x000f, 0x000f, 0x000f, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x000a, 0x0010, 0x0000, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x000a, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x0009, 0x0000, + 0x0000, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + // Entry C0 - FF + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0013, 0x0000, + 0x0000, 0x000f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000, 0x0015, + 0x0015, 0x0006, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, + 0x0006, 0x0001, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, + // Entry 100 - 13F + 0x0000, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, + 0x0000, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0016, 0x0016, + 0x0017, 0x0017, 0x0001, 0x0001, 0x8041, 0x0018, 0x0018, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0019, 0x0019, 0x0000, 0x0000, + 0x0017, 0x0017, 0x0017, 0x8044, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, + // Entry 140 - 17F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0006, 0x0006, 0x0000, 0x0000, + 0x8047, 0x0000, 0x0006, 0x0006, 0x001a, 0x001a, 0x001a, 0x001a, + 0x804a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x804c, 0x001b, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x000a, 0x000a, 0x0001, 0x0001, + 0x001c, 0x001c, 0x0009, 0x0009, 0x804f, 0x0000, 0x0000, 0x0000, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x8052, 0x0006, 0x0006, 0x001d, 0x0006, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001e, 0x001e, 0x001f, + 0x001f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, + 0x0001, 0x000d, 0x000d, 0x0000, 0x0000, 0x0020, 0x0020, 0x0006, + 0x0006, 0x0021, 0x0021, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x8054, 0x0000, 0x0000, 0x0000, 0x0000, 0x8056, 0x001b, + 0x0000, 0x0000, 0x0001, 0x0001, 0x0022, 0x0022, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x0000, 0x0023, 0x0023, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0024, 0x0024, 0x8058, 0x0000, 0x0000, 0x0016, 0x0016, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0025, 0x0025, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000d, 0x000d, 0x0000, 0x0000, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x805a, 0x0000, 0x0000, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x805b, 0x0026, 0x805d, + // Entry 200 - 23F + 0x0000, 0x0000, 0x0000, 0x0000, 0x805e, 0x0015, 0x0015, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x8061, 0x0000, 0x0000, 0x8062, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0001, + 0x0001, 0x0015, 0x0015, 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0027, 0x0027, 0x0027, 0x8065, 0x8067, + 0x001b, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, + 0x8069, 0x0028, 0x0006, 0x0001, 0x0006, 0x0001, 0x0001, 0x0001, + // Entry 240 - 27F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0006, 0x0000, 0x0000, 0x001a, 0x001a, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0029, 0x0029, 0x0029, 0x0029, + 0x0029, 0x0029, 0x0029, 0x0006, 0x0006, 0x0000, 0x0000, 0x002a, + 0x002a, 0x0000, 0x0000, 0x0000, 0x0000, 0x806b, 0x0000, 0x0000, + 0x002b, 0x002b, 0x002b, 0x002b, 0x0006, 0x0006, 0x000d, 0x000d, + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x002c, 0x002c, 0x002d, 0x002d, 0x002e, 0x002e, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x002f, 0x002f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, 0x806d, 0x0022, 0x0022, + 0x0022, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0030, 0x0030, 0x0000, 0x0000, 0x8071, 0x0031, 0x0006, + // Entry 2C0 - 2FF + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x000d, 0x000d, 0x0001, + 0x0001, 0x0000, 0x0000, 0x0032, 0x0032, 0x8074, 0x8076, 0x001b, + 0x8077, 0x8079, 0x0028, 0x807b, 0x0034, 0x0033, 0x0033, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0035, 0x0035, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0036, 0x0037, 0x0037, 0x0036, 0x0036, 0x0001, + 0x0001, 0x807d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8080, + // Entry 300 - 33F + 0x0036, 0x0036, 0x0036, 0x0000, 0x0000, 0x0006, 0x0014, +} // Size: 1550 bytes + +// langToAlt is a list of numbering system and symbol set pairs, sorted and +// marked by compact language index. +var langToAlt = []altSymData{ // 131 elements + 1: {compactTag: 0x0, symIndex: 0x38, system: 0x3}, + 2: {compactTag: 0x0, symIndex: 0x42, system: 0x4}, + 3: {compactTag: 0xa, symIndex: 0x39, system: 0x3}, + 4: {compactTag: 0xa, symIndex: 0x2, system: 0x0}, + 5: {compactTag: 0x28, symIndex: 0x0, system: 0x6}, + 6: {compactTag: 0x2c, symIndex: 0x5, system: 0x0}, + 7: {compactTag: 0x2c, symIndex: 0x3a, system: 0x3}, + 8: {compactTag: 0x2c, symIndex: 0x42, system: 0x4}, + 9: {compactTag: 0x40, symIndex: 0x0, system: 0x6}, + 10: {compactTag: 0x43, symIndex: 0x0, system: 0x0}, + 11: {compactTag: 0x43, symIndex: 0x4f, system: 0x37}, + 12: {compactTag: 0x46, symIndex: 0x1, system: 0x0}, + 13: {compactTag: 0x46, symIndex: 0x38, system: 0x3}, + 14: {compactTag: 0x54, symIndex: 0x0, system: 0x9}, + 15: {compactTag: 0x5d, symIndex: 0x3a, system: 0x3}, + 16: {compactTag: 0x5d, symIndex: 0x8, system: 0x0}, + 17: {compactTag: 0x60, symIndex: 0x1, system: 0x0}, + 18: {compactTag: 0x60, symIndex: 0x38, system: 0x3}, + 19: {compactTag: 0x60, symIndex: 0x42, system: 0x4}, + 20: {compactTag: 0x60, symIndex: 0x0, system: 0x5}, + 21: {compactTag: 0x60, symIndex: 0x0, system: 0x6}, + 22: {compactTag: 0x60, symIndex: 0x0, system: 0x8}, + 23: {compactTag: 0x60, symIndex: 0x0, system: 0x9}, + 24: {compactTag: 0x60, symIndex: 0x0, system: 0xa}, + 25: {compactTag: 0x60, symIndex: 0x0, system: 0xb}, + 26: {compactTag: 0x60, symIndex: 0x0, system: 0xc}, + 27: {compactTag: 0x60, symIndex: 0x0, system: 0xd}, + 28: {compactTag: 0x60, symIndex: 0x0, system: 0xe}, + 29: {compactTag: 0x60, symIndex: 0x0, system: 0xf}, + 30: {compactTag: 0x60, symIndex: 0x0, system: 0x11}, + 31: {compactTag: 0x60, symIndex: 0x0, system: 0x12}, + 32: {compactTag: 0x60, symIndex: 0x0, system: 0x13}, + 33: {compactTag: 0x60, symIndex: 0x0, system: 0x14}, + 34: {compactTag: 0x60, symIndex: 0x0, system: 0x15}, + 35: {compactTag: 0x60, symIndex: 0x0, system: 0x16}, + 36: {compactTag: 0x60, symIndex: 0x0, system: 0x17}, + 37: {compactTag: 0x60, symIndex: 0x0, system: 0x18}, + 38: {compactTag: 0x60, symIndex: 0x0, system: 0x19}, + 39: {compactTag: 0x60, symIndex: 0x0, system: 0x1f}, + 40: {compactTag: 0x60, symIndex: 0x0, system: 0x21}, + 41: {compactTag: 0x60, symIndex: 0x0, system: 0x23}, + 42: {compactTag: 0x60, symIndex: 0x0, system: 0x24}, + 43: {compactTag: 0x60, symIndex: 0x0, system: 0x25}, + 44: {compactTag: 0x60, symIndex: 0x0, system: 0x28}, + 45: {compactTag: 0x60, symIndex: 0x0, system: 0x29}, + 46: {compactTag: 0x60, symIndex: 0x0, system: 0x2a}, + 47: {compactTag: 0x60, symIndex: 0x0, system: 0x2b}, + 48: {compactTag: 0x60, symIndex: 0x0, system: 0x2c}, + 49: {compactTag: 0x60, symIndex: 0x0, system: 0x2d}, + 50: {compactTag: 0x60, symIndex: 0x0, system: 0x30}, + 51: {compactTag: 0x60, symIndex: 0x0, system: 0x31}, + 52: {compactTag: 0x60, symIndex: 0x0, system: 0x32}, + 53: {compactTag: 0x60, symIndex: 0x0, system: 0x33}, + 54: {compactTag: 0x60, symIndex: 0x0, system: 0x34}, + 55: {compactTag: 0x60, symIndex: 0x0, system: 0x35}, + 56: {compactTag: 0x60, symIndex: 0x0, system: 0x36}, + 57: {compactTag: 0x60, symIndex: 0x0, system: 0x37}, + 58: {compactTag: 0x60, symIndex: 0x0, system: 0x39}, + 59: {compactTag: 0x60, symIndex: 0x0, system: 0x43}, + 60: {compactTag: 0x64, symIndex: 0x0, system: 0x0}, + 61: {compactTag: 0x64, symIndex: 0x38, system: 0x3}, + 62: {compactTag: 0x64, symIndex: 0x42, system: 0x4}, + 63: {compactTag: 0x7c, symIndex: 0x50, system: 0x37}, + 64: {compactTag: 0x7c, symIndex: 0x0, system: 0x0}, + 65: {compactTag: 0x114, symIndex: 0x43, system: 0x4}, + 66: {compactTag: 0x114, symIndex: 0x18, system: 0x0}, + 67: {compactTag: 0x114, symIndex: 0x3b, system: 0x3}, + 68: {compactTag: 0x123, symIndex: 0x1, system: 0x0}, + 69: {compactTag: 0x123, symIndex: 0x3c, system: 0x3}, + 70: {compactTag: 0x123, symIndex: 0x44, system: 0x4}, + 71: {compactTag: 0x158, symIndex: 0x0, system: 0x0}, + 72: {compactTag: 0x158, symIndex: 0x3b, system: 0x3}, + 73: {compactTag: 0x158, symIndex: 0x45, system: 0x4}, + 74: {compactTag: 0x160, symIndex: 0x0, system: 0x0}, + 75: {compactTag: 0x160, symIndex: 0x38, system: 0x3}, + 76: {compactTag: 0x16d, symIndex: 0x1b, system: 0x0}, + 77: {compactTag: 0x16d, symIndex: 0x0, system: 0x9}, + 78: {compactTag: 0x16d, symIndex: 0x0, system: 0xa}, + 79: {compactTag: 0x17c, symIndex: 0x0, system: 0x0}, + 80: {compactTag: 0x17c, symIndex: 0x3d, system: 0x3}, + 81: {compactTag: 0x17c, symIndex: 0x42, system: 0x4}, + 82: {compactTag: 0x182, symIndex: 0x6, system: 0x0}, + 83: {compactTag: 0x182, symIndex: 0x38, system: 0x3}, + 84: {compactTag: 0x1b1, symIndex: 0x0, system: 0x0}, + 85: {compactTag: 0x1b1, symIndex: 0x3e, system: 0x3}, + 86: {compactTag: 0x1b6, symIndex: 0x42, system: 0x4}, + 87: {compactTag: 0x1b6, symIndex: 0x1b, system: 0x0}, + 88: {compactTag: 0x1d2, symIndex: 0x42, system: 0x4}, + 89: {compactTag: 0x1d2, symIndex: 0x0, system: 0x0}, + 90: {compactTag: 0x1f3, symIndex: 0x0, system: 0xb}, + 91: {compactTag: 0x1fd, symIndex: 0x4e, system: 0x24}, + 92: {compactTag: 0x1fd, symIndex: 0x26, system: 0x0}, + 93: {compactTag: 0x1ff, symIndex: 0x42, system: 0x4}, + 94: {compactTag: 0x204, symIndex: 0x15, system: 0x0}, + 95: {compactTag: 0x204, symIndex: 0x3f, system: 0x3}, + 96: {compactTag: 0x204, symIndex: 0x46, system: 0x4}, + 97: {compactTag: 0x20c, symIndex: 0x0, system: 0xb}, + 98: {compactTag: 0x20f, symIndex: 0x6, system: 0x0}, + 99: {compactTag: 0x20f, symIndex: 0x38, system: 0x3}, + 100: {compactTag: 0x20f, symIndex: 0x42, system: 0x4}, + 101: {compactTag: 0x22e, symIndex: 0x0, system: 0x0}, + 102: {compactTag: 0x22e, symIndex: 0x47, system: 0x4}, + 103: {compactTag: 0x22f, symIndex: 0x42, system: 0x4}, + 104: {compactTag: 0x22f, symIndex: 0x1b, system: 0x0}, + 105: {compactTag: 0x238, symIndex: 0x42, system: 0x4}, + 106: {compactTag: 0x238, symIndex: 0x28, system: 0x0}, + 107: {compactTag: 0x265, symIndex: 0x38, system: 0x3}, + 108: {compactTag: 0x265, symIndex: 0x0, system: 0x0}, + 109: {compactTag: 0x29d, symIndex: 0x22, system: 0x0}, + 110: {compactTag: 0x29d, symIndex: 0x40, system: 0x3}, + 111: {compactTag: 0x29d, symIndex: 0x48, system: 0x4}, + 112: {compactTag: 0x29d, symIndex: 0x4d, system: 0xc}, + 113: {compactTag: 0x2bd, symIndex: 0x31, system: 0x0}, + 114: {compactTag: 0x2bd, symIndex: 0x3e, system: 0x3}, + 115: {compactTag: 0x2bd, symIndex: 0x42, system: 0x4}, + 116: {compactTag: 0x2cd, symIndex: 0x1b, system: 0x0}, + 117: {compactTag: 0x2cd, symIndex: 0x49, system: 0x4}, + 118: {compactTag: 0x2ce, symIndex: 0x49, system: 0x4}, + 119: {compactTag: 0x2d0, symIndex: 0x33, system: 0x0}, + 120: {compactTag: 0x2d0, symIndex: 0x4a, system: 0x4}, + 121: {compactTag: 0x2d1, symIndex: 0x42, system: 0x4}, + 122: {compactTag: 0x2d1, symIndex: 0x28, system: 0x0}, + 123: {compactTag: 0x2d3, symIndex: 0x34, system: 0x0}, + 124: {compactTag: 0x2d3, symIndex: 0x4b, system: 0x4}, + 125: {compactTag: 0x2f9, symIndex: 0x0, system: 0x0}, + 126: {compactTag: 0x2f9, symIndex: 0x38, system: 0x3}, + 127: {compactTag: 0x2f9, symIndex: 0x42, system: 0x4}, + 128: {compactTag: 0x2ff, symIndex: 0x36, system: 0x0}, + 129: {compactTag: 0x2ff, symIndex: 0x41, system: 0x3}, + 130: {compactTag: 0x2ff, symIndex: 0x4c, system: 0x4}, +} // Size: 810 bytes + +var tagToDecimal = []uint8{ // 775 elements + // Entry 0 - 3F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 40 - 7F + 0x05, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, + // Entry 80 - BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry C0 - FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 100 - 13F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 140 - 17F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 180 - 1BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 1C0 - 1FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 200 - 23F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x05, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 240 - 27F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 280 - 2BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 2C0 - 2FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 300 - 33F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, +} // Size: 799 bytes + +var tagToScientific = []uint8{ // 775 elements + // Entry 0 - 3F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 40 - 7F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 80 - BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry C0 - FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 100 - 13F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, + 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 180 - 1BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 1C0 - 1FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 200 - 23F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, 0x02, + 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 240 - 27F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 280 - 2BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 2C0 - 2FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 300 - 33F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x09, +} // Size: 799 bytes + +var tagToPercent = []uint8{ // 775 elements + // Entry 0 - 3F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x06, 0x06, 0x03, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, 0x03, + 0x03, 0x04, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x07, 0x07, 0x04, 0x04, + // Entry 80 - BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x03, 0x04, + 0x04, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 100 - 13F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + 0x0b, 0x0b, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 140 - 17F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 180 - 1BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + // Entry 1C0 - 1FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 200 - 23F + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x06, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + // Entry 280 - 2BF + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x0e, + // Entry 2C0 - 2FF + 0x0e, 0x0e, 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 300 - 33F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0a, +} // Size: 799 bytes + +var formats = []Pattern{Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x0, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x3, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xc, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xa, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x8, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x6, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x3}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xd, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x4}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x2, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x03%\u00a0\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "\x01[\x01]", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x5, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x1, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x01%\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}} + +// Total table size 8634 bytes (8KiB); checksum: 8F23386D diff --git a/vendor/golang.org/x/text/internal/stringset/set.go b/vendor/golang.org/x/text/internal/stringset/set.go new file mode 100644 index 00000000..bb2fffbc --- /dev/null +++ b/vendor/golang.org/x/text/internal/stringset/set.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stringset provides a way to represent a collection of strings +// compactly. +package stringset + +import "sort" + +// A Set holds a collection of strings that can be looked up by an index number. +type Set struct { + // These fields are exported to allow for code generation. + + Data string + Index []uint16 +} + +// Elem returns the string with index i. It panics if i is out of range. +func (s *Set) Elem(i int) string { + return s.Data[s.Index[i]:s.Index[i+1]] +} + +// Len returns the number of strings in the set. +func (s *Set) Len() int { + return len(s.Index) - 1 +} + +// Search returns the index of the given string or -1 if it is not in the set. +// The Set must have been created with strings in sorted order. +func Search(s *Set, str string) int { + // TODO: optimize this if it gets used a lot. + n := len(s.Index) - 1 + p := sort.Search(n, func(i int) bool { + return s.Elem(i) >= str + }) + if p == n || str != s.Elem(p) { + return -1 + } + return p +} + +// A Builder constructs Sets. +type Builder struct { + set Set + index map[string]int +} + +// NewBuilder returns a new and initialized Builder. +func NewBuilder() *Builder { + return &Builder{ + set: Set{ + Index: []uint16{0}, + }, + index: map[string]int{}, + } +} + +// Set creates the set created so far. +func (b *Builder) Set() Set { + return b.set +} + +// Index returns the index for the given string, which must have been added +// before. +func (b *Builder) Index(s string) int { + return b.index[s] +} + +// Add adds a string to the index. Strings that are added by a single Add will +// be stored together, unless they match an existing string. +func (b *Builder) Add(ss ...string) { + // First check if the string already exists. + for _, s := range ss { + if _, ok := b.index[s]; ok { + continue + } + b.index[s] = len(b.set.Index) - 1 + b.set.Data += s + x := len(b.set.Data) + if x > 0xFFFF { + panic("Index too > 0xFFFF") + } + b.set.Index = append(b.set.Index, uint16(x)) + } +} diff --git a/vendor/golang.org/x/text/message/catalog.go b/vendor/golang.org/x/text/message/catalog.go new file mode 100644 index 00000000..068271de --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +// TODO: some types in this file will need to be made public at some time. +// Documentation and method names will reflect this by using the exported name. + +import ( + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// MatchLanguage reports the matched tag obtained from language.MatchStrings for +// the Matcher of the DefaultCatalog. +func MatchLanguage(preferred ...string) language.Tag { + c := DefaultCatalog + tag, _ := language.MatchStrings(c.Matcher(), preferred...) + return tag +} + +// DefaultCatalog is used by SetString. +var DefaultCatalog catalog.Catalog = defaultCatalog + +var defaultCatalog = catalog.NewBuilder() + +// SetString calls SetString on the initial default Catalog. +func SetString(tag language.Tag, key string, msg string) error { + return defaultCatalog.SetString(tag, key, msg) +} + +// Set calls Set on the initial default Catalog. +func Set(tag language.Tag, key string, msg ...catalog.Message) error { + return defaultCatalog.Set(tag, key, msg...) +} diff --git a/vendor/golang.org/x/text/message/catalog/catalog.go b/vendor/golang.org/x/text/message/catalog/catalog.go new file mode 100644 index 00000000..96955d07 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/catalog.go @@ -0,0 +1,365 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catalog defines collections of translated format strings. +// +// This package mostly defines types for populating catalogs with messages. The +// catmsg package contains further definitions for creating custom message and +// dictionary types as well as packages that use Catalogs. +// +// Package catalog defines various interfaces: Dictionary, Loader, and Message. +// A Dictionary maintains a set of translations of format strings for a single +// language. The Loader interface defines a source of dictionaries. A +// translation of a format string is represented by a Message. +// +// # Catalogs +// +// A Catalog defines a programmatic interface for setting message translations. +// It maintains a set of per-language dictionaries with translations for a set +// of keys. For message translation to function properly, a translation should +// be defined for each key for each supported language. A dictionary may be +// underspecified, though, if there is a parent language that already defines +// the key. For example, a Dictionary for "en-GB" could leave out entries that +// are identical to those in a dictionary for "en". +// +// # Messages +// +// A Message is a format string which varies on the value of substitution +// variables. For instance, to indicate the number of results one could want "no +// results" if there are none, "1 result" if there is 1, and "%d results" for +// any other number. Catalog is agnostic to the kind of format strings that are +// used: for instance, messages can follow either the printf-style substitution +// from package fmt or use templates. +// +// A Message does not substitute arguments in the format string. This job is +// reserved for packages that render strings, such as message, that use Catalogs +// to selected string. This separation of concerns allows Catalog to be used to +// store any kind of formatting strings. +// +// # Selecting messages based on linguistic features of substitution arguments +// +// Messages may vary based on any linguistic features of the argument values. +// The most common one is plural form, but others exist. +// +// Selection messages are provided in packages that provide support for a +// specific linguistic feature. The following snippet uses plural.Selectf: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// plural.Selectf(1, "", +// plural.One, "You are 1 minute late.", +// plural.Other, "You are %d minutes late.")) +// +// In this example, a message is stored in the Catalog where one of two messages +// is selected based on the first argument, a number. The first message is +// selected if the argument is singular (identified by the selector "one") and +// the second message is selected in all other cases. The selectors are defined +// by the plural rules defined in CLDR. The selector "other" is special and will +// always match. Each language always defines one of the linguistic categories +// to be "other." For English, singular is "one" and plural is "other". +// +// Selects can be nested. This allows selecting sentences based on features of +// multiple arguments or multiple linguistic properties of a single argument. +// +// # String interpolation +// +// There is often a lot of commonality between the possible variants of a +// message. For instance, in the example above the word "minute" varies based on +// the plural catogory of the argument, but the rest of the sentence is +// identical. Using interpolation the above message can be rewritten as: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", +// plural.Selectf(1, "", plural.One, "minute", plural.Other, "minutes")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// Var is defined to return the variable name if the message does not yield a +// match. This allows us to further simplify this snippet to +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Selectf(1, "", plural.One, "minute")), +// catalog.String("You are %d ${minutes} late.")) +// +// Overall this is still only a minor improvement, but things can get a lot more +// unwieldy if more than one linguistic feature is used to determine a message +// variant. Consider the following example: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// catalog.Var("their", +// plural.Selectf(1, "" +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// catalog.Var("invites", plural.Selectf(1, "", plural.One, "invite")) +// catalog.String("%[1]v ${invites} %[2]v to ${their} party.")), +// +// Without variable substitution, this would have to be written as +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// plural.Selectf(1, "", +// plural.One, gender.Select(1, +// "female", "%[1]v invites %[2]v to her party." +// "other", "%[1]v invites %[2]v to his party."), +// plural.Other, "%[1]v invites %[2]v to their party.")) +// +// Not necessarily shorter, but using variables there is less duplication and +// the messages are more maintenance friendly. Moreover, languages may have up +// to six plural forms. This makes the use of variables more welcome. +// +// Different messages using the same inflections can reuse variables by moving +// them to macros. Using macros we can rewrite the message as: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.SetString(language.English, "%[1]v invite(s) %[2]v to their party.", +// "%[1]v ${invites(1)} %[2]v to ${their(1)} party.") +// +// Where the following macros were defined separately. +// +// catalog.SetMacro(language.English, "invites", plural.Selectf(1, "", +// plural.One, "invite")) +// catalog.SetMacro(language.English, "their", plural.Selectf(1, "", +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// +// Placeholders use parentheses and the arguments to invoke a macro. +// +// # Looking up messages +// +// Message lookup using Catalogs is typically only done by specialized packages +// and is not something the user should be concerned with. For instance, to +// express the tardiness of a user using the related message we defined earlier, +// the user may use the package message like so: +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) +// +// Which would print: +// +// You are 5 minutes late. +// +// This package is UNDER CONSTRUCTION and its API may change. +package catalog // import "golang.org/x/text/message/catalog" + +// TODO: +// Some way to freeze a catalog. +// - Locking on each lockup turns out to be about 50% of the total running time +// for some of the benchmarks in the message package. +// Consider these: +// - Sequence type to support sequences in user-defined messages. +// - Garbage collection: Remove dictionaries that can no longer be reached +// as other dictionaries have been added that cover all possible keys. + +import ( + "errors" + "fmt" + + "golang.org/x/text/internal" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// A Catalog allows lookup of translated messages. +type Catalog interface { + // Languages returns all languages for which the Catalog contains variants. + Languages() []language.Tag + + // Matcher returns a Matcher for languages from this Catalog. + Matcher() language.Matcher + + // A Context is used for evaluating Messages. + Context(tag language.Tag, r catmsg.Renderer) *Context + + // This method also makes Catalog a private interface. + lookup(tag language.Tag, key string) (data string, ok bool) +} + +// NewFromMap creates a Catalog from the given map. If a Dictionary is +// underspecified the entry is retrieved from a parent language. +func NewFromMap(dictionaries map[string]Dictionary, opts ...Option) (Catalog, error) { + options := options{} + for _, o := range opts { + o(&options) + } + c := &catalog{ + dicts: map[language.Tag]Dictionary{}, + } + _, hasFallback := dictionaries[options.fallback.String()] + if hasFallback { + // TODO: Should it be okay to not have a fallback language? + // Catalog generators could enforce there is always a fallback. + c.langs = append(c.langs, options.fallback) + } + for lang, dict := range dictionaries { + tag, err := language.Parse(lang) + if err != nil { + return nil, fmt.Errorf("catalog: invalid language tag %q", lang) + } + if _, ok := c.dicts[tag]; ok { + return nil, fmt.Errorf("catalog: duplicate entry for tag %q after normalization", tag) + } + c.dicts[tag] = dict + if !hasFallback || tag != options.fallback { + c.langs = append(c.langs, tag) + } + } + if hasFallback { + internal.SortTags(c.langs[1:]) + } else { + internal.SortTags(c.langs) + } + c.matcher = language.NewMatcher(c.langs) + return c, nil +} + +// A Dictionary is a source of translations for a single language. +type Dictionary interface { + // Lookup returns a message compiled with catmsg.Compile for the given key. + // It returns false for ok if such a message could not be found. + Lookup(key string) (data string, ok bool) +} + +type catalog struct { + langs []language.Tag + dicts map[language.Tag]Dictionary + macros store + matcher language.Matcher +} + +func (c *catalog) Languages() []language.Tag { return c.langs } +func (c *catalog) Matcher() language.Matcher { return c.matcher } + +func (c *catalog) lookup(tag language.Tag, key string) (data string, ok bool) { + for ; ; tag = tag.Parent() { + if dict, ok := c.dicts[tag]; ok { + if data, ok := dict.Lookup(key); ok { + return data, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (c *catalog) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: c, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&c.macros, tag}), + } +} + +// A Builder allows building a Catalog programmatically. +type Builder struct { + options + matcher language.Matcher + + index store + macros store +} + +type options struct { + fallback language.Tag +} + +// An Option configures Catalog behavior. +type Option func(*options) + +// Fallback specifies the default fallback language. The default is Und. +func Fallback(tag language.Tag) Option { + return func(o *options) { o.fallback = tag } +} + +// TODO: +// // Catalogs specifies one or more sources for a Catalog. +// // Lookups are in order. +// // This can be changed inserting a Catalog used for setting, which implements +// // Loader, used for setting in the chain. +// func Catalogs(d ...Loader) Option { +// return nil +// } +// +// func Delims(start, end string) Option {} +// +// func Dict(tag language.Tag, d ...Dictionary) Option + +// NewBuilder returns an empty mutable Catalog. +func NewBuilder(opts ...Option) *Builder { + c := &Builder{} + for _, o := range opts { + o(&c.options) + } + return c +} + +// SetString is shorthand for Set(tag, key, String(msg)). +func (c *Builder) SetString(tag language.Tag, key string, msg string) error { + return c.set(tag, key, &c.index, String(msg)) +} + +// Set sets the translation for the given language and key. +// +// When evaluation this message, the first Message in the sequence to msgs to +// evaluate to a string will be the message returned. +func (c *Builder) Set(tag language.Tag, key string, msg ...Message) error { + return c.set(tag, key, &c.index, msg...) +} + +// SetMacro defines a Message that may be substituted in another message. +// The arguments to a macro Message are passed as arguments in the +// placeholder the form "${foo(arg1, arg2)}". +func (c *Builder) SetMacro(tag language.Tag, name string, msg ...Message) error { + return c.set(tag, name, &c.macros, msg...) +} + +// ErrNotFound indicates there was no message for the given key. +var ErrNotFound = errors.New("catalog: message not found") + +// String specifies a plain message string. It can be used as fallback if no +// other strings match or as a simple standalone message. +// +// It is an error to pass more than one String in a message sequence. +func String(name string) Message { + return catmsg.String(name) +} + +// Var sets a variable that may be substituted in formatting patterns using +// named substitution of the form "${name}". The name argument is used as a +// fallback if the statements do not produce a match. The statement sequence may +// not contain any Var calls. +// +// The name passed to a Var must be unique within message sequence. +func Var(name string, msg ...Message) Message { + return &catmsg.Var{Name: name, Message: firstInSequence(msg)} +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (b *Builder) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: b, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&b.macros, tag}), + } +} + +// A Context is used for evaluating Messages. +// Only one Message may be formatted per context at any given time. +type Context struct { + cat Catalog + tag language.Tag // TODO: use compact index. + dec *catmsg.Decoder +} + +// Execute looks up and executes the message with the given key. +// It returns ErrNotFound if no message could be found in the index. +func (c *Context) Execute(key string) error { + data, ok := c.cat.lookup(c.tag, key) + if !ok { + return ErrNotFound + } + return c.dec.Execute(data) +} diff --git a/vendor/golang.org/x/text/message/catalog/dict.go b/vendor/golang.org/x/text/message/catalog/dict.go new file mode 100644 index 00000000..a0eb8181 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/dict.go @@ -0,0 +1,129 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catalog + +import ( + "sync" + + "golang.org/x/text/internal" + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// TODO: +// Dictionary returns a Dictionary that returns the first Message, using the +// given language tag, that matches: +// 1. the last one registered by one of the Set methods +// 2. returned by one of the Loaders +// 3. repeat from 1. using the parent language +// This approach allows messages to be underspecified. +// func (c *Catalog) Dictionary(tag language.Tag) (Dictionary, error) { +// // TODO: verify dictionary exists. +// return &dict{&c.index, tag}, nil +// } + +type dict struct { + s *store + tag language.Tag // TODO: make compact tag. +} + +func (d *dict) Lookup(key string) (data string, ok bool) { + return d.s.lookup(d.tag, key) +} + +func (b *Builder) lookup(tag language.Tag, key string) (data string, ok bool) { + return b.index.lookup(tag, key) +} + +func (c *Builder) set(tag language.Tag, key string, s *store, msg ...Message) error { + data, err := catmsg.Compile(tag, &dict{&c.macros, tag}, firstInSequence(msg)) + + s.mutex.Lock() + defer s.mutex.Unlock() + + m := s.index[tag] + if m == nil { + m = msgMap{} + if s.index == nil { + s.index = map[language.Tag]msgMap{} + } + c.matcher = nil + s.index[tag] = m + } + + m[key] = data + return err +} + +func (c *Builder) Matcher() language.Matcher { + c.index.mutex.RLock() + m := c.matcher + c.index.mutex.RUnlock() + if m != nil { + return m + } + + c.index.mutex.Lock() + if c.matcher == nil { + c.matcher = language.NewMatcher(c.unlockedLanguages()) + } + m = c.matcher + c.index.mutex.Unlock() + return m +} + +type store struct { + mutex sync.RWMutex + index map[language.Tag]msgMap +} + +type msgMap map[string]string + +func (s *store) lookup(tag language.Tag, key string) (data string, ok bool) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for ; ; tag = tag.Parent() { + if msgs, ok := s.index[tag]; ok { + if msg, ok := msgs[key]; ok { + return msg, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Languages returns all languages for which the Catalog contains variants. +func (b *Builder) Languages() []language.Tag { + s := &b.index + s.mutex.RLock() + defer s.mutex.RUnlock() + + return b.unlockedLanguages() +} + +func (b *Builder) unlockedLanguages() []language.Tag { + s := &b.index + if len(s.index) == 0 { + return nil + } + tags := make([]language.Tag, 0, len(s.index)) + _, hasFallback := s.index[b.options.fallback] + offset := 0 + if hasFallback { + tags = append(tags, b.options.fallback) + offset = 1 + } + for t := range s.index { + if t != b.options.fallback { + tags = append(tags, t) + } + } + internal.SortTags(tags[offset:]) + return tags +} diff --git a/vendor/golang.org/x/text/message/catalog/go19.go b/vendor/golang.org/x/text/message/catalog/go19.go new file mode 100644 index 00000000..291a4df9 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/go19.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 + +package catalog + +import "golang.org/x/text/internal/catmsg" + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message = catmsg.Message + +type firstInSequence = catmsg.FirstOf diff --git a/vendor/golang.org/x/text/message/catalog/gopre19.go b/vendor/golang.org/x/text/message/catalog/gopre19.go new file mode 100644 index 00000000..da44ebb8 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/gopre19.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.9 + +package catalog + +import "golang.org/x/text/internal/catmsg" + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + catmsg.Message +} + +func firstInSequence(m []Message) catmsg.Message { + a := []catmsg.Message{} + for _, m := range m { + a = append(a, m) + } + return catmsg.FirstOf(a) +} diff --git a/vendor/golang.org/x/text/message/doc.go b/vendor/golang.org/x/text/message/doc.go new file mode 100644 index 00000000..4bf7bdca --- /dev/null +++ b/vendor/golang.org/x/text/message/doc.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package message implements formatted I/O for localized strings with functions +// analogous to the fmt's print functions. It is a drop-in replacement for fmt. +// +// # Localized Formatting +// +// A format string can be localized by replacing any of the print functions of +// fmt with an equivalent call to a Printer. +// +// p := message.NewPrinter(message.MatchLanguage("en")) +// p.Println(123456.78) // Prints 123,456.78 +// +// p.Printf("%d ducks in a row", 4331) // Prints 4,331 ducks in a row +// +// p := message.NewPrinter(message.MatchLanguage("nl")) +// p.Printf("Hoogte: %.1f meter", 1244.9) // Prints Hoogte: 1,244.9 meter +// +// p := message.NewPrinter(message.MatchLanguage("bn")) +// p.Println(123456.78) // Prints ১,২৩,৪৫৬.৭৮ +// +// Printer currently supports numbers and specialized types for which packages +// exist in x/text. Other builtin types such as time.Time and slices are +// planned. +// +// Format strings largely have the same meaning as with fmt with the following +// notable exceptions: +// - flag # always resorts to fmt for printing +// - verb 'f', 'e', 'g', 'd' use localized formatting unless the '#' flag is +// specified. +// - verb 'm' inserts a translation of a string argument. +// +// See package fmt for more options. +// +// # Translation +// +// The format strings that are passed to Printf, Sprintf, Fprintf, or Errorf +// are used as keys to look up translations for the specified languages. +// More on how these need to be specified below. +// +// One can use arbitrary keys to distinguish between otherwise ambiguous +// strings: +// +// p := message.NewPrinter(language.English) +// p.Printf("archive(noun)") // Prints "archive" +// p.Printf("archive(verb)") // Prints "archive" +// +// p := message.NewPrinter(language.German) +// p.Printf("archive(noun)") // Prints "Archiv" +// p.Printf("archive(verb)") // Prints "archivieren" +// +// To retain the fallback functionality, use Key: +// +// p.Printf(message.Key("archive(noun)", "archive")) +// p.Printf(message.Key("archive(verb)", "archive")) +// +// # Translation Pipeline +// +// Format strings that contain text need to be translated to support different +// locales. The first step is to extract strings that need to be translated. +// +// 1. Install gotext +// +// go get -u golang.org/x/text/cmd/gotext +// gotext -help +// +// 2. Mark strings in your source to be translated by using message.Printer, +// instead of the functions of the fmt package. +// +// 3. Extract the strings from your source +// +// gotext extract +// +// The output will be written to the textdata directory. +// +// 4. Send the files for translation +// +// It is planned to support multiple formats, but for now one will have to +// rewrite the JSON output to the desired format. +// +// 5. Inject translations into program +// +// 6. Repeat from 2 +// +// Right now this has to be done programmatically with calls to Set or +// SetString. These functions as well as the methods defined in +// see also package golang.org/x/text/message/catalog can be used to implement +// either dynamic or static loading of messages. +// +// # Plural and Gender Forms +// +// Translated messages can vary based on the plural and gender forms of +// substitution values. In general, it is up to the translators to provide +// alternative translations for such forms. See the packages in +// golang.org/x/text/feature and golang.org/x/text/message/catalog for more +// information. +package message diff --git a/vendor/golang.org/x/text/message/format.go b/vendor/golang.org/x/text/message/format.go new file mode 100644 index 00000000..a47d17dd --- /dev/null +++ b/vendor/golang.org/x/text/message/format.go @@ -0,0 +1,510 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "strconv" + "unicode/utf8" + + "golang.org/x/text/internal/format" +) + +const ( + ldigits = "0123456789abcdefx" + udigits = "0123456789ABCDEFX" +) + +const ( + signed = true + unsigned = false +) + +// A formatInfo is the raw formatter used by Printf etc. +// It prints into a buffer that must be set up separately. +type formatInfo struct { + buf *bytes.Buffer + + format.Parser + + // intbuf is large enough to store %b of an int64 with a sign and + // avoids padding at the end of the struct on 32 bit architectures. + intbuf [68]byte +} + +func (f *formatInfo) init(buf *bytes.Buffer) { + f.ClearFlags() + f.buf = buf +} + +// writePadding generates n bytes of padding. +func (f *formatInfo) writePadding(n int) { + if n <= 0 { // No padding bytes needed. + return + } + f.buf.Grow(n) + // Decide which byte the padding should be filled with. + padByte := byte(' ') + if f.Zero { + padByte = byte('0') + } + // Fill padding with padByte. + for i := 0; i < n; i++ { + f.buf.WriteByte(padByte) // TODO: make more efficient. + } +} + +// pad appends b to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) pad(b []byte) { + if !f.WidthPresent || f.Width == 0 { + f.buf.Write(b) + return + } + width := f.Width - utf8.RuneCount(b) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.Write(b) + } else { + // right padding + f.buf.Write(b) + f.writePadding(width) + } +} + +// padString appends s to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) padString(s string) { + if !f.WidthPresent || f.Width == 0 { + f.buf.WriteString(s) + return + } + width := f.Width - utf8.RuneCountInString(s) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.WriteString(s) + } else { + // right padding + f.buf.WriteString(s) + f.writePadding(width) + } +} + +// fmt_boolean formats a boolean. +func (f *formatInfo) fmt_boolean(v bool) { + if v { + f.padString("true") + } else { + f.padString("false") + } +} + +// fmt_unicode formats a uint64 as "U+0078" or with f.sharp set as "U+0078 'x'". +func (f *formatInfo) fmt_unicode(u uint64) { + buf := f.intbuf[0:] + + // With default precision set the maximum needed buf length is 18 + // for formatting -1 with %#U ("U+FFFFFFFFFFFFFFFF") which fits + // into the already allocated intbuf with a capacity of 68 bytes. + prec := 4 + if f.PrecPresent && f.Prec > 4 { + prec = f.Prec + // Compute space needed for "U+" , number, " '", character, "'". + width := 2 + prec + 2 + utf8.UTFMax + 1 + if width > len(buf) { + buf = make([]byte, width) + } + } + + // Format into buf, ending at buf[i]. Formatting numbers is easier right-to-left. + i := len(buf) + + // For %#U we want to add a space and a quoted character at the end of the buffer. + if f.Sharp && u <= utf8.MaxRune && strconv.IsPrint(rune(u)) { + i-- + buf[i] = '\'' + i -= utf8.RuneLen(rune(u)) + utf8.EncodeRune(buf[i:], rune(u)) + i-- + buf[i] = '\'' + i-- + buf[i] = ' ' + } + // Format the Unicode code point u as a hexadecimal number. + for u >= 16 { + i-- + buf[i] = udigits[u&0xF] + prec-- + u >>= 4 + } + i-- + buf[i] = udigits[u] + prec-- + // Add zeros in front of the number until requested precision is reached. + for prec > 0 { + i-- + buf[i] = '0' + prec-- + } + // Add a leading "U+". + i-- + buf[i] = '+' + i-- + buf[i] = 'U' + + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// fmt_integer formats signed and unsigned integers. +func (f *formatInfo) fmt_integer(u uint64, base int, isSigned bool, digits string) { + negative := isSigned && int64(u) < 0 + if negative { + u = -u + } + + buf := f.intbuf[0:] + // The already allocated f.intbuf with a capacity of 68 bytes + // is large enough for integer formatting when no precision or width is set. + if f.WidthPresent || f.PrecPresent { + // Account 3 extra bytes for possible addition of a sign and "0x". + width := 3 + f.Width + f.Prec // wid and prec are always positive. + if width > len(buf) { + // We're going to need a bigger boat. + buf = make([]byte, width) + } + } + + // Two ways to ask for extra leading zero digits: %.3d or %03d. + // If both are specified the f.zero flag is ignored and + // padding with spaces is used instead. + prec := 0 + if f.PrecPresent { + prec = f.Prec + // Precision of 0 and value of 0 means "print nothing" but padding. + if prec == 0 && u == 0 { + oldZero := f.Zero + f.Zero = false + f.writePadding(f.Width) + f.Zero = oldZero + return + } + } else if f.Zero && f.WidthPresent { + prec = f.Width + if negative || f.Plus || f.Space { + prec-- // leave room for sign + } + } + + // Because printing is easier right-to-left: format u into buf, ending at buf[i]. + // We could make things marginally faster by splitting the 32-bit case out + // into a separate block but it's not worth the duplication, so u has 64 bits. + i := len(buf) + // Use constants for the division and modulo for more efficient code. + // Switch cases ordered by popularity. + switch base { + case 10: + for u >= 10 { + i-- + next := u / 10 + buf[i] = byte('0' + u - next*10) + u = next + } + case 16: + for u >= 16 { + i-- + buf[i] = digits[u&0xF] + u >>= 4 + } + case 8: + for u >= 8 { + i-- + buf[i] = byte('0' + u&7) + u >>= 3 + } + case 2: + for u >= 2 { + i-- + buf[i] = byte('0' + u&1) + u >>= 1 + } + default: + panic("fmt: unknown base; can't happen") + } + i-- + buf[i] = digits[u] + for i > 0 && prec > len(buf)-i { + i-- + buf[i] = '0' + } + + // Various prefixes: 0x, -, etc. + if f.Sharp { + switch base { + case 8: + if buf[i] != '0' { + i-- + buf[i] = '0' + } + case 16: + // Add a leading 0x or 0X. + i-- + buf[i] = digits[16] + i-- + buf[i] = '0' + } + } + + if negative { + i-- + buf[i] = '-' + } else if f.Plus { + i-- + buf[i] = '+' + } else if f.Space { + i-- + buf[i] = ' ' + } + + // Left padding with zeros has already been handled like precision earlier + // or the f.zero flag is ignored due to an explicitly set precision. + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// truncate truncates the string to the specified precision, if present. +func (f *formatInfo) truncate(s string) string { + if f.PrecPresent { + n := f.Prec + for i := range s { + n-- + if n < 0 { + return s[:i] + } + } + } + return s +} + +// fmt_s formats a string. +func (f *formatInfo) fmt_s(s string) { + s = f.truncate(s) + f.padString(s) +} + +// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sbx(s string, b []byte, digits string) { + length := len(b) + if b == nil { + // No byte slice present. Assume string s should be encoded. + length = len(s) + } + // Set length to not process more bytes than the precision demands. + if f.PrecPresent && f.Prec < length { + length = f.Prec + } + // Compute width of the encoding taking into account the f.sharp and f.space flag. + width := 2 * length + if width > 0 { + if f.Space { + // Each element encoded by two hexadecimals will get a leading 0x or 0X. + if f.Sharp { + width *= 2 + } + // Elements will be separated by a space. + width += length - 1 + } else if f.Sharp { + // Only a leading 0x or 0X will be added for the whole string. + width += 2 + } + } else { // The byte slice or string that should be encoded is empty. + if f.WidthPresent { + f.writePadding(f.Width) + } + return + } + // Handle padding to the left. + if f.WidthPresent && f.Width > width && !f.Minus { + f.writePadding(f.Width - width) + } + // Write the encoding directly into the output buffer. + buf := f.buf + if f.Sharp { + // Add leading 0x or 0X. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + var c byte + for i := 0; i < length; i++ { + if f.Space && i > 0 { + // Separate elements with a space. + buf.WriteByte(' ') + if f.Sharp { + // Add leading 0x or 0X for each element. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + } + if b != nil { + c = b[i] // Take a byte from the input byte slice. + } else { + c = s[i] // Take a byte from the input string. + } + // Encode each byte as two hexadecimal digits. + buf.WriteByte(digits[c>>4]) + buf.WriteByte(digits[c&0xF]) + } + // Handle padding to the right. + if f.WidthPresent && f.Width > width && f.Minus { + f.writePadding(f.Width - width) + } +} + +// fmt_sx formats a string as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sx(s, digits string) { + f.fmt_sbx(s, nil, digits) +} + +// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_bx(b []byte, digits string) { + f.fmt_sbx("", b, digits) +} + +// fmt_q formats a string as a double-quoted, escaped Go string constant. +// If f.sharp is set a raw (backquoted) string may be returned instead +// if the string does not contain any control characters other than tab. +func (f *formatInfo) fmt_q(s string) { + s = f.truncate(s) + if f.Sharp && strconv.CanBackquote(s) { + f.padString("`" + s + "`") + return + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteToASCII(buf, s)) + } else { + f.pad(strconv.AppendQuote(buf, s)) + } +} + +// fmt_c formats an integer as a Unicode character. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_c(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + w := utf8.EncodeRune(buf[:utf8.UTFMax], r) + f.pad(buf[:w]) +} + +// fmt_qc formats an integer as a single-quoted, escaped Go character constant. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_qc(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteRuneToASCII(buf, r)) + } else { + f.pad(strconv.AppendQuoteRune(buf, r)) + } +} + +// fmt_float formats a float64. It assumes that verb is a valid format specifier +// for strconv.AppendFloat and therefore fits into a byte. +func (f *formatInfo) fmt_float(v float64, size int, verb rune, prec int) { + // Explicit precision in format specifier overrules default precision. + if f.PrecPresent { + prec = f.Prec + } + // Format number, reserving space for leading + sign if needed. + num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size) + if num[1] == '-' || num[1] == '+' { + num = num[1:] + } else { + num[0] = '+' + } + // f.space means to add a leading space instead of a "+" sign unless + // the sign is explicitly asked for by f.plus. + if f.Space && num[0] == '+' && !f.Plus { + num[0] = ' ' + } + // Special handling for infinities and NaN, + // which don't look like a number so shouldn't be padded with zeros. + if num[1] == 'I' || num[1] == 'N' { + oldZero := f.Zero + f.Zero = false + // Remove sign before NaN if not asked for. + if num[1] == 'N' && !f.Space && !f.Plus { + num = num[1:] + } + f.pad(num) + f.Zero = oldZero + return + } + // The sharp flag forces printing a decimal point for non-binary formats + // and retains trailing zeros, which we may need to restore. + if f.Sharp && verb != 'b' { + digits := 0 + switch verb { + case 'v', 'g', 'G': + digits = prec + // If no precision is set explicitly use a precision of 6. + if digits == -1 { + digits = 6 + } + } + + // Buffer pre-allocated with enough room for + // exponent notations of the form "e+123". + var tailBuf [5]byte + tail := tailBuf[:0] + + hasDecimalPoint := false + // Starting from i = 1 to skip sign at num[0]. + for i := 1; i < len(num); i++ { + switch num[i] { + case '.': + hasDecimalPoint = true + case 'e', 'E': + tail = append(tail, num[i:]...) + num = num[:i] + default: + digits-- + } + } + if !hasDecimalPoint { + num = append(num, '.') + } + for digits > 0 { + num = append(num, '0') + digits-- + } + num = append(num, tail...) + } + // We want a sign if asked for and if the sign is not positive. + if f.Plus || num[0] != '+' { + // If we're zero padding to the left we want the sign before the leading zeros. + // Achieve this by writing the sign out and then padding the unsigned number. + if f.Zero && f.WidthPresent && f.Width > len(num) { + f.buf.WriteByte(num[0]) + f.writePadding(f.Width - len(num)) + f.buf.Write(num[1:]) + return + } + f.pad(num) + return + } + // No sign to show and the number is positive; just print the unsigned number. + f.pad(num[1:]) +} diff --git a/vendor/golang.org/x/text/message/message.go b/vendor/golang.org/x/text/message/message.go new file mode 100644 index 00000000..91a97264 --- /dev/null +++ b/vendor/golang.org/x/text/message/message.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message // import "golang.org/x/text/message" + +import ( + "io" + "os" + + // Include features to facilitate generated catalogs. + _ "golang.org/x/text/feature/plural" + + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// A Printer implements language-specific formatted I/O analogous to the fmt +// package. +type Printer struct { + // the language + tag language.Tag + + toDecimal number.Formatter + toScientific number.Formatter + + cat catalog.Catalog +} + +type options struct { + cat catalog.Catalog + // TODO: + // - allow %s to print integers in written form (tables are likely too large + // to enable this by default). + // - list behavior + // +} + +// An Option defines an option of a Printer. +type Option func(o *options) + +// Catalog defines the catalog to be used. +func Catalog(c catalog.Catalog) Option { + return func(o *options) { o.cat = c } +} + +// NewPrinter returns a Printer that formats messages tailored to language t. +func NewPrinter(t language.Tag, opts ...Option) *Printer { + options := &options{ + cat: DefaultCatalog, + } + for _, o := range opts { + o(options) + } + p := &Printer{ + tag: t, + cat: options.cat, + } + p.toDecimal.InitDecimal(t) + p.toScientific.InitScientific(t) + return p +} + +// Sprint is like fmt.Sprint, but using language-specific formatting. +func (p *Printer) Sprint(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrint(a) + s := pp.String() + pp.free() + return s +} + +// Fprint is like fmt.Fprint, but using language-specific formatting. +func (p *Printer) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrint(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Print is like fmt.Print, but using language-specific formatting. +func (p *Printer) Print(a ...interface{}) (n int, err error) { + return p.Fprint(os.Stdout, a...) +} + +// Sprintln is like fmt.Sprintln, but using language-specific formatting. +func (p *Printer) Sprintln(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrintln(a) + s := pp.String() + pp.free() + return s +} + +// Fprintln is like fmt.Fprintln, but using language-specific formatting. +func (p *Printer) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrintln(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Println is like fmt.Println, but using language-specific formatting. +func (p *Printer) Println(a ...interface{}) (n int, err error) { + return p.Fprintln(os.Stdout, a...) +} + +// Sprintf is like fmt.Sprintf, but using language-specific formatting. +func (p *Printer) Sprintf(key Reference, a ...interface{}) string { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + s := pp.String() + pp.free() + return s +} + +// Fprintf is like fmt.Fprintf, but using language-specific formatting. +func (p *Printer) Fprintf(w io.Writer, key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = w.Write(pp.Bytes()) + pp.free() + return n, err + +} + +// Printf is like fmt.Printf, but using language-specific formatting. +func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = os.Stdout.Write(pp.Bytes()) + pp.free() + return n, err +} + +func lookupAndFormat(p *printer, r Reference, a []interface{}) { + p.fmt.Reset(a) + switch v := r.(type) { + case string: + if p.catContext.Execute(v) == catalog.ErrNotFound { + p.Render(v) + return + } + case key: + if p.catContext.Execute(v.id) == catalog.ErrNotFound && + p.catContext.Execute(v.fallback) == catalog.ErrNotFound { + p.Render(v.fallback) + return + } + default: + panic("key argument is not a Reference") + } +} + +type rawPrinter struct { + p *printer +} + +func (p rawPrinter) Render(msg string) { p.p.WriteString(msg) } +func (p rawPrinter) Arg(i int) interface{} { return nil } + +// Arg implements catmsg.Renderer. +func (p *printer) Arg(i int) interface{} { // TODO, also return "ok" bool + i-- + if uint(i) < uint(len(p.fmt.Args)) { + return p.fmt.Args[i] + } + return nil +} + +// Render implements catmsg.Renderer. +func (p *printer) Render(msg string) { + p.doPrintf(msg) +} + +// A Reference is a string or a message reference. +type Reference interface { + // TODO: also allow []string +} + +// Key creates a message Reference for a message where the given id is used for +// message lookup and the fallback is returned when no matches are found. +func Key(id string, fallback string) Reference { + return key{id, fallback} +} + +type key struct { + id, fallback string +} diff --git a/vendor/golang.org/x/text/message/print.go b/vendor/golang.org/x/text/message/print.go new file mode 100644 index 00000000..da304cc0 --- /dev/null +++ b/vendor/golang.org/x/text/message/print.go @@ -0,0 +1,984 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "fmt" // TODO: consider copying interfaces from package fmt to avoid dependency. + "math" + "reflect" + "sync" + "unicode/utf8" + + "golang.org/x/text/internal/format" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// Strings for use with buffer.WriteString. +// This is less overhead than using buffer.Write with byte arrays. +const ( + commaSpaceString = ", " + nilAngleString = "" + nilParenString = "(nil)" + nilString = "nil" + mapString = "map[" + percentBangString = "%!" + missingString = "(MISSING)" + badIndexString = "(BADINDEX)" + panicString = "(PANIC=" + extraString = "%!(EXTRA " + badWidthString = "%!(BADWIDTH)" + badPrecString = "%!(BADPREC)" + noVerbString = "%!(NOVERB)" + + invReflectString = "" +) + +var printerPool = sync.Pool{ + New: func() interface{} { return new(printer) }, +} + +// newPrinter allocates a new printer struct or grabs a cached one. +func newPrinter(pp *Printer) *printer { + p := printerPool.Get().(*printer) + p.Printer = *pp + // TODO: cache most of the following call. + p.catContext = pp.cat.Context(pp.tag, p) + + p.panicking = false + p.erroring = false + p.fmt.init(&p.Buffer) + return p +} + +// free saves used printer structs in printerFree; avoids an allocation per invocation. +func (p *printer) free() { + p.Buffer.Reset() + p.arg = nil + p.value = reflect.Value{} + printerPool.Put(p) +} + +// printer is used to store a printer's state. +// It implements "golang.org/x/text/internal/format".State. +type printer struct { + Printer + + // the context for looking up message translations + catContext *catalog.Context + + // buffer for accumulating output. + bytes.Buffer + + // arg holds the current item, as an interface{}. + arg interface{} + // value is used instead of arg for reflect values. + value reflect.Value + + // fmt is used to format basic items such as integers or strings. + fmt formatInfo + + // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion. + panicking bool + // erroring is set when printing an error string to guard against calling handleMethods. + erroring bool +} + +// Language implements "golang.org/x/text/internal/format".State. +func (p *printer) Language() language.Tag { return p.tag } + +func (p *printer) Width() (wid int, ok bool) { return p.fmt.Width, p.fmt.WidthPresent } + +func (p *printer) Precision() (prec int, ok bool) { return p.fmt.Prec, p.fmt.PrecPresent } + +func (p *printer) Flag(b int) bool { + switch b { + case '-': + return p.fmt.Minus + case '+': + return p.fmt.Plus || p.fmt.PlusV + case '#': + return p.fmt.Sharp || p.fmt.SharpV + case ' ': + return p.fmt.Space + case '0': + return p.fmt.Zero + } + return false +} + +// getField gets the i'th field of the struct value. +// If the field is itself is an interface, return a value for +// the thing inside the interface, not the interface itself. +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} + +func (p *printer) unknownType(v reflect.Value) { + if !v.IsValid() { + p.WriteString(nilAngleString) + return + } + p.WriteByte('?') + p.WriteString(v.Type().String()) + p.WriteByte('?') +} + +func (p *printer) badVerb(verb rune) { + p.erroring = true + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteByte('(') + switch { + case p.arg != nil: + p.WriteString(reflect.TypeOf(p.arg).String()) + p.WriteByte('=') + p.printArg(p.arg, 'v') + case p.value.IsValid(): + p.WriteString(p.value.Type().String()) + p.WriteByte('=') + p.printValue(p.value, 'v', 0) + default: + p.WriteString(nilAngleString) + } + p.WriteByte(')') + p.erroring = false +} + +func (p *printer) fmtBool(v bool, verb rune) { + switch verb { + case 't', 'v': + p.fmt.fmt_boolean(v) + default: + p.badVerb(verb) + } +} + +// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or +// not, as requested, by temporarily setting the sharp flag. +func (p *printer) fmt0x64(v uint64, leading0x bool) { + sharp := p.fmt.Sharp + p.fmt.Sharp = leading0x + p.fmt.fmt_integer(v, 16, unsigned, ldigits) + p.fmt.Sharp = sharp +} + +// fmtInteger formats a signed or unsigned integer. +func (p *printer) fmtInteger(v uint64, isSigned bool, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV && !isSigned { + p.fmt0x64(v, true) + return + } + fallthrough + case 'd': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_integer(v, 10, isSigned, ldigits) + } else { + p.fmtDecimalInt(v, isSigned) + } + case 'b': + p.fmt.fmt_integer(v, 2, isSigned, ldigits) + case 'o': + p.fmt.fmt_integer(v, 8, isSigned, ldigits) + case 'x': + p.fmt.fmt_integer(v, 16, isSigned, ldigits) + case 'X': + p.fmt.fmt_integer(v, 16, isSigned, udigits) + case 'c': + p.fmt.fmt_c(v) + case 'q': + if v <= utf8.MaxRune { + p.fmt.fmt_qc(v) + } else { + p.badVerb(verb) + } + case 'U': + p.fmt.fmt_unicode(v) + default: + p.badVerb(verb) + } +} + +// fmtFloat formats a float. The default precision for each verb +// is specified as last argument in the call to fmt_float. +func (p *printer) fmtFloat(v float64, size int, verb rune) { + switch verb { + case 'b': + p.fmt.fmt_float(v, size, verb, -1) + case 'v': + verb = 'g' + fallthrough + case 'g', 'G': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, -1) + } else { + p.fmtVariableFloat(v, size) + } + case 'e', 'E': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtScientific(v, size, 6) + } + case 'f', 'F': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtDecimalFloat(v, size, 6) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) setFlags(f *number.Formatter) { + f.Flags &^= number.ElideSign + if p.fmt.Plus || p.fmt.Space { + f.Flags |= number.AlwaysSign + if !p.fmt.Plus { + f.Flags |= number.ElideSign + } + } else { + f.Flags &^= number.AlwaysSign + } +} + +func (p *printer) updatePadding(f *number.Formatter) { + f.Flags &^= number.PadMask + if p.fmt.Minus { + f.Flags |= number.PadAfterSuffix + } else { + f.Flags |= number.PadBeforePrefix + } + f.PadRune = ' ' + f.FormatWidth = uint16(p.fmt.Width) +} + +func (p *printer) initDecimal(minFrac, maxFrac int) { + f := &p.toDecimal + f.MinIntegerDigits = 1 + f.MaxIntegerDigits = 0 + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + if p.fmt.Zero { + wid := p.fmt.Width + // Use significant integers for this. + // TODO: this is not the same as width, but so be it. + if f.MinFractionDigits > 0 { + wid -= 1 + int(f.MinFractionDigits) + } + if p.fmt.Plus || p.fmt.Space { + wid-- + } + if wid > 0 && wid > int(f.MinIntegerDigits) { + f.MinIntegerDigits = uint8(wid) + } + } + p.updatePadding(f) + } +} + +func (p *printer) initScientific(minFrac, maxFrac int) { + f := &p.toScientific + if maxFrac < 0 { + f.SetPrecision(maxFrac) + } else { + f.SetPrecision(maxFrac + 1) + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + } + f.MinExponentDigits = 2 + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + f.Flags &^= number.PadMask + if p.fmt.Zero { + f.PadRune = f.Digit(0) + f.Flags |= number.PadAfterPrefix + } else { + f.PadRune = ' ' + f.Flags |= number.PadBeforePrefix + } + p.updatePadding(f) + } +} + +func (p *printer) fmtDecimalInt(v uint64, isSigned bool) { + var d number.Decimal + + f := &p.toDecimal + if p.fmt.PrecPresent { + p.setFlags(f) + f.MinIntegerDigits = uint8(p.fmt.Prec) + f.MaxIntegerDigits = 0 + f.MinFractionDigits = 0 + f.MaxFractionDigits = 0 + if p.fmt.WidthPresent { + p.updatePadding(f) + } + } else { + p.initDecimal(0, 0) + } + d.ConvertInt(p.toDecimal.RoundingContext, isSigned, v) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtDecimalFloat(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initDecimal(prec, prec) + d.ConvertFloat(p.toDecimal.RoundingContext, v, size) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtVariableFloat(v float64, size int) { + prec := -1 + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + var d number.Decimal + p.initScientific(0, prec) + d.ConvertFloat(p.toScientific.RoundingContext, v, size) + + // Copy logic of 'g' formatting from strconv. It is simplified a bit as + // we don't have to mind having prec > len(d.Digits). + shortest := prec < 0 + ePrec := prec + if shortest { + prec = len(d.Digits) + ePrec = 6 + } else if prec == 0 { + prec = 1 + ePrec = 1 + } + exp := int(d.Exp) - 1 + if exp < -4 || exp >= ePrec { + p.initScientific(0, prec) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + } else { + if prec > int(d.Exp) { + prec = len(d.Digits) + } + if prec -= int(d.Exp); prec < 0 { + prec = 0 + } + p.initDecimal(0, prec) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) + } +} + +func (p *printer) fmtScientific(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initScientific(prec, prec) + rc := p.toScientific.RoundingContext + d.ConvertFloat(rc, v, size) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + +} + +// fmtComplex formats a complex number v with +// r = real(v) and j = imag(v) as (r+ji) using +// fmtFloat for r and j formatting. +func (p *printer) fmtComplex(v complex128, size int, verb rune) { + // Make sure any unsupported verbs are found before the + // calls to fmtFloat to not generate an incorrect error string. + switch verb { + case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E': + p.WriteByte('(') + p.fmtFloat(real(v), size/2, verb) + // Imaginary part always has a sign. + if math.IsNaN(imag(v)) { + // By CLDR's rules, NaNs do not use patterns or signs. As this code + // relies on AlwaysSign working for imaginary parts, we need to + // manually handle NaNs. + f := &p.toScientific + p.setFlags(f) + p.updatePadding(f) + p.setFlags(f) + nan := f.Symbol(number.SymNan) + extra := 0 + if w, ok := p.Width(); ok { + extra = w - utf8.RuneCountInString(nan) - 1 + } + if f.Flags&number.PadAfterNumber == 0 { + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + } + p.WriteString(f.Symbol(number.SymPlusSign)) + p.WriteString(nan) + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + p.WriteString("i)") + return + } + oldPlus := p.fmt.Plus + p.fmt.Plus = true + p.fmtFloat(imag(v), size/2, verb) + p.WriteString("i)") // TODO: use symbol? + p.fmt.Plus = oldPlus + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtString(v string, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV { + p.fmt.fmt_q(v) + } else { + p.fmt.fmt_s(v) + } + case 's': + p.fmt.fmt_s(v) + case 'x': + p.fmt.fmt_sx(v, ldigits) + case 'X': + p.fmt.fmt_sx(v, udigits) + case 'q': + p.fmt.fmt_q(v) + case 'm': + ctx := p.cat.Context(p.tag, rawPrinter{p}) + if ctx.Execute(v) == catalog.ErrNotFound { + p.WriteString(v) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtBytes(v []byte, verb rune, typeString string) { + switch verb { + case 'v', 'd': + if p.fmt.SharpV { + p.WriteString(typeString) + if v == nil { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i, c := range v { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.fmt0x64(uint64(c), true) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i, c := range v { + if i > 0 { + p.WriteByte(' ') + } + p.fmt.fmt_integer(uint64(c), 10, unsigned, ldigits) + } + p.WriteByte(']') + } + case 's': + p.fmt.fmt_s(string(v)) + case 'x': + p.fmt.fmt_bx(v, ldigits) + case 'X': + p.fmt.fmt_bx(v, udigits) + case 'q': + p.fmt.fmt_q(string(v)) + default: + p.printValue(reflect.ValueOf(v), verb, 0) + } +} + +func (p *printer) fmtPointer(value reflect.Value, verb rune) { + var u uintptr + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + u = value.Pointer() + default: + p.badVerb(verb) + return + } + + switch verb { + case 'v': + if p.fmt.SharpV { + p.WriteByte('(') + p.WriteString(value.Type().String()) + p.WriteString(")(") + if u == 0 { + p.WriteString(nilString) + } else { + p.fmt0x64(uint64(u), true) + } + p.WriteByte(')') + } else { + if u == 0 { + p.fmt.padString(nilAngleString) + } else { + p.fmt0x64(uint64(u), !p.fmt.Sharp) + } + } + case 'p': + p.fmt0x64(uint64(u), !p.fmt.Sharp) + case 'b', 'o', 'd', 'x', 'X': + if verb == 'd' { + p.fmt.Sharp = true // Print as standard go. TODO: does this make sense? + } + p.fmtInteger(uint64(u), unsigned, verb) + default: + p.badVerb(verb) + } +} + +func (p *printer) catchPanic(arg interface{}, verb rune) { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() { + p.WriteString(nilAngleString) + return + } + // Otherwise print a concise panic message. Most of the time the panic + // value will print itself nicely. + if p.panicking { + // Nested panics; the recursion in printArg cannot succeed. + panic(err) + } + + oldFlags := p.fmt.Parser + // For this output we want default behavior. + p.fmt.ClearFlags() + + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(panicString) + p.panicking = true + p.printArg(err, 'v') + p.panicking = false + p.WriteByte(')') + + p.fmt.Parser = oldFlags + } +} + +func (p *printer) handleMethods(verb rune) (handled bool) { + if p.erroring { + return + } + // Is it a Formatter? + if formatter, ok := p.arg.(format.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + if formatter, ok := p.arg.(fmt.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + + // If we're doing Go syntax and the argument knows how to supply it, take care of it now. + if p.fmt.SharpV { + if stringer, ok := p.arg.(fmt.GoStringer); ok { + handled = true + defer p.catchPanic(p.arg, verb) + // Print the result of GoString unadorned. + p.fmt.fmt_s(stringer.GoString()) + return + } + } else { + // If a string is acceptable according to the format, see if + // the value satisfies one of the string-valued interfaces. + // Println etc. set verb to %v, which is "stringable". + switch verb { + case 'v', 's', 'x', 'X', 'q': + // Is it an error or Stringer? + // The duplication in the bodies is necessary: + // setting handled and deferring catchPanic + // must happen before calling the method. + switch v := p.arg.(type) { + case error: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.Error(), verb) + return + + case fmt.Stringer: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.String(), verb) + return + } + } + } + return false +} + +func (p *printer) printArg(arg interface{}, verb rune) { + p.arg = arg + p.value = reflect.Value{} + + if arg == nil { + switch verb { + case 'T', 'v': + p.fmt.padString(nilAngleString) + default: + p.badVerb(verb) + } + return + } + + // Special processing considerations. + // %T (the value's type) and %p (its address) are special; we always do them first. + switch verb { + case 'T': + p.fmt.fmt_s(reflect.TypeOf(arg).String()) + return + case 'p': + p.fmtPointer(reflect.ValueOf(arg), 'p') + return + } + + // Some types can be done without reflection. + switch f := arg.(type) { + case bool: + p.fmtBool(f, verb) + case float32: + p.fmtFloat(float64(f), 32, verb) + case float64: + p.fmtFloat(f, 64, verb) + case complex64: + p.fmtComplex(complex128(f), 64, verb) + case complex128: + p.fmtComplex(f, 128, verb) + case int: + p.fmtInteger(uint64(f), signed, verb) + case int8: + p.fmtInteger(uint64(f), signed, verb) + case int16: + p.fmtInteger(uint64(f), signed, verb) + case int32: + p.fmtInteger(uint64(f), signed, verb) + case int64: + p.fmtInteger(uint64(f), signed, verb) + case uint: + p.fmtInteger(uint64(f), unsigned, verb) + case uint8: + p.fmtInteger(uint64(f), unsigned, verb) + case uint16: + p.fmtInteger(uint64(f), unsigned, verb) + case uint32: + p.fmtInteger(uint64(f), unsigned, verb) + case uint64: + p.fmtInteger(f, unsigned, verb) + case uintptr: + p.fmtInteger(uint64(f), unsigned, verb) + case string: + p.fmtString(f, verb) + case []byte: + p.fmtBytes(f, verb, "[]byte") + case reflect.Value: + // Handle extractable values with special methods + // since printValue does not handle them at depth 0. + if f.IsValid() && f.CanInterface() { + p.arg = f.Interface() + if p.handleMethods(verb) { + return + } + } + p.printValue(f, verb, 0) + default: + // If the type is not simple, it might have methods. + if !p.handleMethods(verb) { + // Need to use reflection, since the type had no + // interface methods that could be used for formatting. + p.printValue(reflect.ValueOf(f), verb, 0) + } + } +} + +// printValue is similar to printArg but starts with a reflect value, not an interface{} value. +// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg. +func (p *printer) printValue(value reflect.Value, verb rune, depth int) { + // Handle values with special methods if not already handled by printArg (depth == 0). + if depth > 0 && value.IsValid() && value.CanInterface() { + p.arg = value.Interface() + if p.handleMethods(verb) { + return + } + } + p.arg = nil + p.value = value + + switch f := value; value.Kind() { + case reflect.Invalid: + if depth == 0 { + p.WriteString(invReflectString) + } else { + switch verb { + case 'v': + p.WriteString(nilAngleString) + default: + p.badVerb(verb) + } + } + case reflect.Bool: + p.fmtBool(f.Bool(), verb) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.fmtInteger(uint64(f.Int()), signed, verb) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.fmtInteger(f.Uint(), unsigned, verb) + case reflect.Float32: + p.fmtFloat(f.Float(), 32, verb) + case reflect.Float64: + p.fmtFloat(f.Float(), 64, verb) + case reflect.Complex64: + p.fmtComplex(f.Complex(), 64, verb) + case reflect.Complex128: + p.fmtComplex(f.Complex(), 128, verb) + case reflect.String: + p.fmtString(f.String(), verb) + case reflect.Map: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + } else { + p.WriteString(mapString) + } + keys := f.MapKeys() + for i, key := range keys { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + p.printValue(key, verb, depth+1) + p.WriteByte(':') + p.printValue(f.MapIndex(key), verb, depth+1) + } + if p.fmt.SharpV { + p.WriteByte('}') + } else { + p.WriteByte(']') + } + case reflect.Struct: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + } + p.WriteByte('{') + for i := 0; i < f.NumField(); i++ { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + if p.fmt.PlusV || p.fmt.SharpV { + if name := f.Type().Field(i).Name; name != "" { + p.WriteString(name) + p.WriteByte(':') + } + } + p.printValue(getField(f, i), verb, depth+1) + } + p.WriteByte('}') + case reflect.Interface: + value := f.Elem() + if !value.IsValid() { + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + p.WriteString(nilParenString) + } else { + p.WriteString(nilAngleString) + } + } else { + p.printValue(value, verb, depth+1) + } + case reflect.Array, reflect.Slice: + switch verb { + case 's', 'q', 'x', 'X': + // Handle byte and uint8 slices and arrays special for the above verbs. + t := f.Type() + if t.Elem().Kind() == reflect.Uint8 { + var bytes []byte + if f.Kind() == reflect.Slice { + bytes = f.Bytes() + } else if f.CanAddr() { + bytes = f.Slice(0, f.Len()).Bytes() + } else { + // We have an array, but we cannot Slice() a non-addressable array, + // so we build a slice by hand. This is a rare case but it would be nice + // if reflection could help a little more. + bytes = make([]byte, f.Len()) + for i := range bytes { + bytes[i] = byte(f.Index(i).Uint()) + } + } + p.fmtBytes(bytes, verb, t.String()) + return + } + } + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.Kind() == reflect.Slice && f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteByte(' ') + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte(']') + } + case reflect.Ptr: + // pointer to array or slice or struct? ok at top level + // but not embedded (avoid loops) + if depth == 0 && f.Pointer() != 0 { + switch a := f.Elem(); a.Kind() { + case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map: + p.WriteByte('&') + p.printValue(a, verb, depth+1) + return + } + } + fallthrough + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + p.fmtPointer(f, verb) + default: + p.unknownType(f) + } +} + +func (p *printer) badArgNum(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(badIndexString) +} + +func (p *printer) missingArg(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(missingString) +} + +func (p *printer) doPrintf(fmt string) { + for p.fmt.Parser.SetFormat(fmt); p.fmt.Scan(); { + switch p.fmt.Status { + case format.StatusText: + p.WriteString(p.fmt.Text()) + case format.StatusSubstitution: + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadWidthSubstitution: + p.WriteString(badWidthString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadPrecSubstitution: + p.WriteString(badPrecString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusNoVerb: + p.WriteString(noVerbString) + case format.StatusBadArgNum: + p.badArgNum(p.fmt.Verb) + case format.StatusMissingArg: + p.missingArg(p.fmt.Verb) + default: + panic("unreachable") + } + } + + // Check for extra arguments, but only if there was at least one ordered + // argument. Note that this behavior is necessarily different from fmt: + // different variants of messages may opt to drop some or all of the + // arguments. + if !p.fmt.Reordered && p.fmt.ArgNum < len(p.fmt.Args) && p.fmt.ArgNum != 0 { + p.fmt.ClearFlags() + p.WriteString(extraString) + for i, arg := range p.fmt.Args[p.fmt.ArgNum:] { + if i > 0 { + p.WriteString(commaSpaceString) + } + if arg == nil { + p.WriteString(nilAngleString) + } else { + p.WriteString(reflect.TypeOf(arg).String()) + p.WriteString("=") + p.printArg(arg, 'v') + } + } + p.WriteByte(')') + } +} + +func (p *printer) doPrint(a []interface{}) { + prevString := false + for argNum, arg := range a { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + // Add a space between two non-string arguments. + if argNum > 0 && !isString && !prevString { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + prevString = isString + } +} + +// doPrintln is like doPrint but always adds a space between arguments +// and a newline after the last argument. +func (p *printer) doPrintln(a []interface{}) { + for argNum, arg := range a { + if argNum > 0 { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + } + p.WriteByte('\n') +} diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE index 6a66aea5..2a7cf70d 100644 --- a/vendor/golang.org/x/time/LICENSE +++ b/vendor/golang.org/x/time/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f49..93a798ab 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go index 2b19c93e..94eaee84 100644 --- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -58,6 +58,11 @@ // where t is the lower-cased name of the first type listed. It can be overridden // with the -output flag. // +// Types can also be declared in tests, in which case type declarations in the +// non-test package or its test variant are preferred over types defined in the +// package with suffix "_test". +// The default output file for type declarations in tests is t_string_test.go with t picked as above. +// // The -linecomment flag tells stringer to generate the text of any line comment, trimmed // of leading spaces, instead of the constant name. For instance, if the constants above had a // Pill prefix, one could write @@ -65,6 +70,9 @@ // PillAspirin // Aspirin // // to suppress it in the output. + +//go:debug gotypesalias=0 + package main // import "golang.org/x/tools/cmd/stringer" import ( @@ -128,10 +136,6 @@ func main() { // Parse the package once. var dir string - g := Generator{ - trimPrefix: *trimprefix, - lineComment: *linecomment, - } // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). if len(args) == 1 && isDirectory(args[0]) { dir = args[0] @@ -142,33 +146,90 @@ func main() { dir = filepath.Dir(args[0]) } - g.parsePackage(args, tags) + // For each type, generate code in the first package where the type is declared. + // The order of packages is as follows: + // package x + // package x compiled for tests + // package x_test + // + // Each package pass could result in a separate generated file. + // These files must have the same package and test/not-test nature as the types + // from which they were generated. + // + // Types will be excluded when generated, to avoid repetitions. + pkgs := loadPackages(args, tags, *trimprefix, *linecomment, nil /* logf */) + sort.Slice(pkgs, func(i, j int) bool { + // Put x_test packages last. + iTest := strings.HasSuffix(pkgs[i].name, "_test") + jTest := strings.HasSuffix(pkgs[j].name, "_test") + if iTest != jTest { + return !iTest + } - // Print the header and package clause. - g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) - g.Printf("\n") - g.Printf("package %s", g.pkg.name) - g.Printf("\n") - g.Printf("import \"strconv\"\n") // Used by all methods. + return len(pkgs[i].files) < len(pkgs[j].files) + }) + for _, pkg := range pkgs { + g := Generator{ + pkg: pkg, + } - // Run generate for each type. - for _, typeName := range types { - g.generate(typeName) + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for types that can be found. Keep the rest for the remainingTypes iteration. + var foundTypes, remainingTypes []string + for _, typeName := range types { + values := findValues(typeName, pkg) + if len(values) > 0 { + g.generate(typeName, values) + foundTypes = append(foundTypes, typeName) + } else { + remainingTypes = append(remainingTypes, typeName) + } + } + if len(foundTypes) == 0 { + // This package didn't have any of the relevant types, skip writing a file. + continue + } + if len(remainingTypes) > 0 && output != nil && *output != "" { + log.Fatalf("cannot write to single file (-output=%q) when matching types are found in multiple packages", *output) + } + types = remainingTypes + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + // Type names will be unique across packages since only the first + // match is picked. + // So there won't be collisions between a package compiled for tests + // and the separate package of tests (package foo_test). + outputName = filepath.Join(dir, baseName(pkg, foundTypes[0])) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } } - // Format the output. - src := g.format() - - // Write to file. - outputName := *output - if outputName == "" { - baseName := fmt.Sprintf("%s_string.go", types[0]) - outputName = filepath.Join(dir, strings.ToLower(baseName)) + if len(types) > 0 { + log.Fatalf("no values defined for types: %s", strings.Join(types, ",")) } - err := os.WriteFile(outputName, src, 0644) - if err != nil { - log.Fatalf("writing output: %s", err) +} + +// baseName that will put the generated code together with pkg. +func baseName(pkg *Package, typename string) string { + suffix := "string.go" + if pkg.hasTestFiles { + suffix = "string_test.go" } + return fmt.Sprintf("%s_%s", strings.ToLower(typename), suffix) } // isDirectory reports whether the named file is a directory. @@ -186,9 +247,6 @@ type Generator struct { buf bytes.Buffer // Accumulated output. pkg *Package // Package we are scanning. - trimPrefix string - lineComment bool - logf func(format string, args ...interface{}) // test logging hook; nil when not testing } @@ -209,54 +267,74 @@ type File struct { } type Package struct { - name string - defs map[*ast.Ident]types.Object - files []*File + name string + defs map[*ast.Ident]types.Object + files []*File + hasTestFiles bool } -// parsePackage analyzes the single package constructed from the patterns and tags. -// parsePackage exits if there is an error. -func (g *Generator) parsePackage(patterns []string, tags []string) { +// loadPackages analyzes the single package constructed from the patterns and tags. +// loadPackages exits if there is an error. +// +// Returns all variants (such as tests) of the package. +// +// logf is a test logging hook. It can be nil when not testing. +func loadPackages( + patterns, tags []string, + trimPrefix string, lineComment bool, + logf func(format string, args ...interface{}), +) []*Package { cfg := &packages.Config{ - Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, - // TODO: Need to think about constants in test files. Maybe write type_string_test.go - // in a separate pass? For later. - Tests: false, + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedFiles, + // Tests are included, let the caller decide how to fold them in. + Tests: true, BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, - Logf: g.logf, + Logf: logf, } pkgs, err := packages.Load(cfg, patterns...) if err != nil { log.Fatal(err) } - if len(pkgs) != 1 { - log.Fatalf("error: %d packages matching %v", len(pkgs), strings.Join(patterns, " ")) + if len(pkgs) == 0 { + log.Fatalf("error: no packages matching %v", strings.Join(patterns, " ")) } - g.addPackage(pkgs[0]) -} -// addPackage adds a type checked Package and its syntax files to the generator. -func (g *Generator) addPackage(pkg *packages.Package) { - g.pkg = &Package{ - name: pkg.Name, - defs: pkg.TypesInfo.Defs, - files: make([]*File, len(pkg.Syntax)), - } + out := make([]*Package, len(pkgs)) + for i, pkg := range pkgs { + p := &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for j, file := range pkg.Syntax { + p.files[j] = &File{ + file: file, + pkg: p, - for i, file := range pkg.Syntax { - g.pkg.files[i] = &File{ - file: file, - pkg: g.pkg, - trimPrefix: g.trimPrefix, - lineComment: g.lineComment, + trimPrefix: trimPrefix, + lineComment: lineComment, + } } + + // Keep track of test files, since we might want to generated + // code that ends up in that kind of package. + // Can be replaced once https://go.dev/issue/38445 lands. + for _, f := range pkg.GoFiles { + if strings.HasSuffix(f, "_test.go") { + p.hasTestFiles = true + break + } + } + + out[i] = p } + return out } -// generate produces the String method for the named type. -func (g *Generator) generate(typeName string) { +func findValues(typeName string, pkg *Package) []Value { values := make([]Value, 0, 100) - for _, file := range g.pkg.files { + for _, file := range pkg.files { // Set the state for this run of the walker. file.typeName = typeName file.values = nil @@ -265,10 +343,11 @@ func (g *Generator) generate(typeName string) { values = append(values, file.values...) } } + return values +} - if len(values) == 0 { - log.Fatalf("no values defined for type %s", typeName) - } +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string, values []Value) { // Generate code that will fail if the constants change value. g.Printf("func _() {\n") g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 1fc1de0b..0e0ba4c0 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -73,6 +73,15 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // check, Preorder is almost twice as fast as Nodes. The two // features seem to contribute similar slowdowns (~1.4x each). + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 00000000..b7e95911 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f..f1931d10 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711..8f7afcb5 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -82,7 +82,7 @@ type DriverResponse struct { type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21..5fcad6ea 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,46 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + } + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - if m != 0 { - out = append(out, "Unknown") + if len(out) == 1 { + return out[0] } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0b6bfaff..f227f1ba 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -46,10 +46,10 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( @@ -103,25 +103,37 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // Be sure to update loadmode_string.go when adding new items! ) const ( + // LoadFiles loads the name and file names for the initial packages. + // // Deprecated: LoadFiles exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + // LoadImports loads the name, file names, and import mapping for the initial packages. + // // Deprecated: LoadImports exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports + // LoadTypes loads exported type information for the initial packages. + // // Deprecated: LoadTypes exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + // LoadSyntax loads typed syntax for the initial packages. + // // Deprecated: LoadSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps @@ -236,14 +248,13 @@ type Config struct { // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -260,7 +271,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -763,6 +774,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 9ada1777..a70b727f 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -228,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -280,7 +280,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - if alias, ok := T.(*aliases.Alias); ok { + if alias, ok := T.(*types.Alias); ok { if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { return Path(r), nil } @@ -320,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -449,8 +449,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return find(obj, types.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -626,7 +626,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -664,7 +664,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = named.Underlying() case opRhs: - if alias, ok := t.(*aliases.Alias); ok { + if alias, ok := t.(*types.Alias); ok { t = aliases.Rhs(alias) } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 00000000..75438035 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 00000000..b81ce0c3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 00000000..8d824f71 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,517 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to any values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary any values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 + + // ptrMap records pointer identity. + ptrMap map[any]uint32 + + // sigTParams holds type parameters from the signature being hashed. + // Signatures are considered identical modulo renaming of type parameters, so + // within the scope of a signature type the identity of the signature's type + // parameters is just their index. + // + // Since the language does not currently support referring to uninstantiated + // generic types or functions, and instantiated signatures do not have type + // parameter lists, we should never encounter a second non-empty type + // parameter list when hashing a generic signature. + sigTParams *types.TypeParamList +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{ + memo: make(map[types.Type]uint32), + ptrMap: make(map[any]uint32), + sigTParams: nil, + } +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.Hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + // Use a separate hasher for types inside of the signature, where type + // parameter identity is modified to be (index, constraint). We must use a + // new memo for this hasher as type identity may be affected by this + // masking. For example, in func[T any](*T), the identity of *T depends on + // whether we are mapping the argument in isolation, or recursively as part + // of hashing the signature. + // + // We should never encounter a generic signature while hashing another + // generic signature, but defensively set sigTParams only if h.mask is + // unset. + tparams := t.TypeParams() + if h.sigTParams == nil && tparams.Len() != 0 { + h = Hasher{ + // There may be something more efficient than discarding the existing + // memo, but it would require detecting whether types are 'tainted' by + // references to type parameters. + memo: make(map[types.Type]uint32), + // Re-using ptrMap ensures that pointer identity is preserved in this + // hasher. + ptrMap: h.ptrMap, + sigTParams: tparams, + } + } + + for i := 0; i < tparams.Len(); i++ { + tparam := tparams.At(i) + hash += 7 * h.Hash(tparam.Constraint()) + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + hash := h.hashPtr(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.Hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} + +func (h Hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.Hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns a hash of the type parameter t, with a hash value +// depending on whether t is contained in h.sigTParams. +// +// If h.sigTParams is set and contains t, then we are in the process of hashing +// a signature, and the hash value of t must depend only on t's index and +// constraint: signatures are considered identical modulo type parameter +// renaming. To avoid infinite recursion, we only hash the type parameter +// index, and rely on types.Identical to handle signatures where constraints +// are not identical. +// +// Otherwise the hash of t depends only on t's pointer identity. +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { + if h.sigTParams != nil { + i := t.Index() + if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { + return 9173 + 3*uint32(i) + } + } + return h.hashPtr(t.Obj()) +} + +// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that +// pointers values are not dependent on the GC. +func (h Hasher) hashPtr(ptr any) uint32 { + if hash, ok := h.ptrMap[ptr]; ok { + return hash + } + hash := uint32(reflect.ValueOf(ptr).Pointer()) + h.ptrMap[ptr] = hash + return hash +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h Hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashPtr(t.Obj()) + + case *types.TypeParam: + return h.hashPtr(t.Obj()) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 00000000..f7666028 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 00000000..9dda6a25 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee..b9425f5a 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index 6652f7db..00000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } -func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } -func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } -func Origin(alias *Alias) *Alias { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index 3ef1afeb..7716a333 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,22 +11,19 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) } // TypeParams returns the type parameter list of the alias. -func TypeParams(alias *Alias) *types.TypeParamList { +func TypeParams(alias *types.Alias) *types.TypeParamList { if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { return alias.TypeParams() // go1.23+ } @@ -37,7 +31,7 @@ func TypeParams(alias *Alias) *types.TypeParamList { } // SetTypeParams sets the type parameters of the alias type. -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { if alias, ok := any(alias).(interface { SetTypeParams(tparams []*types.TypeParam) }); ok { @@ -48,7 +42,7 @@ func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { } // TypeArgs returns the type arguments used to instantiate the Alias type. -func TypeArgs(alias *Alias) *types.TypeList { +func TypeArgs(alias *types.Alias) *types.TypeList { if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { return alias.TypeArgs() // go1.23+ } @@ -57,26 +51,13 @@ func TypeArgs(alias *Alias) *types.TypeList { // Origin returns the generic Alias type of which alias is an instance. // If alias is not an instance of a generic alias, Origin returns alias. -func Origin(alias *Alias) *Alias { +func Origin(alias *types.Alias) *types.Alias { if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { return alias.Origin() // go1.23+ } return alias // not an instance of a generic alias (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a -} - // Enabled reports whether [NewAlias] should create [types.Alias] types. // // This function is expensive! Call it sparingly. @@ -92,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2..d79a605e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df9112..e6c5d51f 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -232,14 +232,19 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified: emitted by cmd/compile since go1.20. _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f3..1e19fbed 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,7 +242,6 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -223,7 +440,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -507,13 +724,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +740,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +974,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1090,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa036..21908a15 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See iexport.go for the export data format. // This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. @@ -53,6 +53,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -562,14 +563,14 @@ func (r *importReader) obj(name string) { pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + r.declare(obj) case constTag: typ, val := r.value() @@ -615,7 +616,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +646,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -852,7 +853,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +863,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +960,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1052,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 00000000..907c8557 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624ca..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c077068..1db40861 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +168,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -471,7 +474,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 2e59ff85..e333efc8 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index b92e8e6e..f6cb37c5 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,7 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 + version Version // sync indicates whether the file uses sync markers. sync bool @@ -68,8 +68,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, @@ -80,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -102,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -136,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -193,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -244,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -515,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a..c17a1239 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a6..00000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7a..00000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b2..50534a29 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef7..1520b73a 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5..582ad56d 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 00000000..53af9df2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index a928acf2..cdaac9ab 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, - {"QUICConfig.EnableStoreSessionEvent", Field, 23}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a3..00000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 00000000..0b84acc5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,140 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantiation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = types.Unalias(V) + T = types.Unalias(T) + + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = types.NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := types.Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := types.Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 00000000..6e83c6fb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,150 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 00000000..35810826 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,118 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := 0; i < n; i++ { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + // TODO(taking): this does not match go/types/infer.go. Check with rfindley. + if params := t.TypeParams(); params.Len() > args.Len() { + return true + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 00000000..93c80fdc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 00000000..cbd12f80 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 00000000..7350bb70 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 00000000..4957f021 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e0538..131caab2 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b7..ba6f4f4e 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,8 +6,6 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the @@ -15,11 +13,11 @@ import ( // It also reports whether a Pointer was present. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a5..00000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go deleted file mode 100644 index f65beed9..00000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.19 -// +build go1.19 - -package versions - -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa12..00000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21..f0bb0d15 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,34 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d33..00000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index aac5db62..00000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/google.golang.org/genproto/googleapis/api/LICENSE b/vendor/google.golang.org/genproto/googleapis/api/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go new file mode 100644 index 00000000..9f81dbcd --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go @@ -0,0 +1,1664 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/expr/v1alpha1/checked.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CEL primitive types. +type Type_PrimitiveType int32 + +const ( + // Unspecified type. + Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0 + // Boolean type. + Type_BOOL Type_PrimitiveType = 1 + // Int64 type. + // + // Proto-based integer values are widened to int64. + Type_INT64 Type_PrimitiveType = 2 + // Uint64 type. + // + // Proto-based unsigned integer values are widened to uint64. + Type_UINT64 Type_PrimitiveType = 3 + // Double type. + // + // Proto-based float values are widened to double values. + Type_DOUBLE Type_PrimitiveType = 4 + // String type. + Type_STRING Type_PrimitiveType = 5 + // Bytes type. + Type_BYTES Type_PrimitiveType = 6 +) + +// Enum value maps for Type_PrimitiveType. +var ( + Type_PrimitiveType_name = map[int32]string{ + 0: "PRIMITIVE_TYPE_UNSPECIFIED", + 1: "BOOL", + 2: "INT64", + 3: "UINT64", + 4: "DOUBLE", + 5: "STRING", + 6: "BYTES", + } + Type_PrimitiveType_value = map[string]int32{ + "PRIMITIVE_TYPE_UNSPECIFIED": 0, + "BOOL": 1, + "INT64": 2, + "UINT64": 3, + "DOUBLE": 4, + "STRING": 5, + "BYTES": 6, + } +) + +func (x Type_PrimitiveType) Enum() *Type_PrimitiveType { + p := new(Type_PrimitiveType) + *p = x + return p +} + +func (x Type_PrimitiveType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_expr_v1alpha1_checked_proto_enumTypes[0].Descriptor() +} + +func (Type_PrimitiveType) Type() protoreflect.EnumType { + return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[0] +} + +func (x Type_PrimitiveType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_PrimitiveType.Descriptor instead. +func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0} +} + +// Well-known protobuf types treated with first-class support in CEL. +type Type_WellKnownType int32 + +const ( + // Unspecified type. + Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0 + // Well-known protobuf.Any type. + // + // Any types are a polymorphic message type. During type-checking they are + // treated like `DYN` types, but at runtime they are resolved to a specific + // message type specified at evaluation time. + Type_ANY Type_WellKnownType = 1 + // Well-known protobuf.Timestamp type, internally referenced as `timestamp`. + Type_TIMESTAMP Type_WellKnownType = 2 + // Well-known protobuf.Duration type, internally referenced as `duration`. + Type_DURATION Type_WellKnownType = 3 +) + +// Enum value maps for Type_WellKnownType. +var ( + Type_WellKnownType_name = map[int32]string{ + 0: "WELL_KNOWN_TYPE_UNSPECIFIED", + 1: "ANY", + 2: "TIMESTAMP", + 3: "DURATION", + } + Type_WellKnownType_value = map[string]int32{ + "WELL_KNOWN_TYPE_UNSPECIFIED": 0, + "ANY": 1, + "TIMESTAMP": 2, + "DURATION": 3, + } +) + +func (x Type_WellKnownType) Enum() *Type_WellKnownType { + p := new(Type_WellKnownType) + *p = x + return p +} + +func (x Type_WellKnownType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_expr_v1alpha1_checked_proto_enumTypes[1].Descriptor() +} + +func (Type_WellKnownType) Type() protoreflect.EnumType { + return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[1] +} + +func (x Type_WellKnownType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Type_WellKnownType.Descriptor instead. +func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1} +} + +// A CEL expression which has been successfully type checked. +type CheckedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A map from expression ids to resolved references. + // + // The following entries are in this table: + // + // - An Ident or Select expression is represented here if it resolves to a + // declaration. For instance, if `a.b.c` is represented by + // `select(select(id(a), b), c)`, and `a.b` resolves to a declaration, + // while `c` is a field selection, then the reference is attached to the + // nested select expression (but not to the id or or the outer select). + // In turn, if `a` resolves to a declaration and `b.c` are field selections, + // the reference is attached to the ident expression. + // - Every Call expression has an entry here, identifying the function being + // called. + // - Every CreateStruct expression for a message has an entry, identifying + // the message. + ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A map from expression ids to types. + // + // Every expression node which has a type different than DYN has a mapping + // here. If an expression has type DYN, it is omitted from this map to save + // space. + TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The source info derived from input that generated the parsed `expr` and + // any optimizations made during the type-checking pass. + SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` + // The expr version indicates the major / minor version number of the `expr` + // representation. + // + // The most common reason for a version change will be to indicate to the CEL + // runtimes that transformations have been performed on the expr during static + // analysis. In some cases, this will save the runtime the work of applying + // the same or similar transformations prior to evaluation. + ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"` + // The checked expression. Semantically equivalent to the parsed `expr`, but + // may have structural differences. + Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"` +} + +func (x *CheckedExpr) Reset() { + *x = CheckedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckedExpr) ProtoMessage() {} + +func (x *CheckedExpr) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead. +func (*CheckedExpr) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{0} +} + +func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference { + if x != nil { + return x.ReferenceMap + } + return nil +} + +func (x *CheckedExpr) GetTypeMap() map[int64]*Type { + if x != nil { + return x.TypeMap + } + return nil +} + +func (x *CheckedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +func (x *CheckedExpr) GetExprVersion() string { + if x != nil { + return x.ExprVersion + } + return "" +} + +func (x *CheckedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +// Represents a CEL type. +type Type struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of type. + // + // Types that are assignable to TypeKind: + // + // *Type_Dyn + // *Type_Null + // *Type_Primitive + // *Type_Wrapper + // *Type_WellKnown + // *Type_ListType_ + // *Type_MapType_ + // *Type_Function + // *Type_MessageType + // *Type_TypeParam + // *Type_Type + // *Type_Error + // *Type_AbstractType_ + TypeKind isType_TypeKind `protobuf_oneof:"type_kind"` +} + +func (x *Type) Reset() { + *x = Type{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type) ProtoMessage() {} + +func (x *Type) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type.ProtoReflect.Descriptor instead. +func (*Type) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1} +} + +func (m *Type) GetTypeKind() isType_TypeKind { + if m != nil { + return m.TypeKind + } + return nil +} + +func (x *Type) GetDyn() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Dyn); ok { + return x.Dyn + } + return nil +} + +func (x *Type) GetNull() structpb.NullValue { + if x, ok := x.GetTypeKind().(*Type_Null); ok { + return x.Null + } + return structpb.NullValue_NULL_VALUE +} + +func (x *Type) GetPrimitive() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Primitive); ok { + return x.Primitive + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWrapper() Type_PrimitiveType { + if x, ok := x.GetTypeKind().(*Type_Wrapper); ok { + return x.Wrapper + } + return Type_PRIMITIVE_TYPE_UNSPECIFIED +} + +func (x *Type) GetWellKnown() Type_WellKnownType { + if x, ok := x.GetTypeKind().(*Type_WellKnown); ok { + return x.WellKnown + } + return Type_WELL_KNOWN_TYPE_UNSPECIFIED +} + +func (x *Type) GetListType() *Type_ListType { + if x, ok := x.GetTypeKind().(*Type_ListType_); ok { + return x.ListType + } + return nil +} + +func (x *Type) GetMapType() *Type_MapType { + if x, ok := x.GetTypeKind().(*Type_MapType_); ok { + return x.MapType + } + return nil +} + +func (x *Type) GetFunction() *Type_FunctionType { + if x, ok := x.GetTypeKind().(*Type_Function); ok { + return x.Function + } + return nil +} + +func (x *Type) GetMessageType() string { + if x, ok := x.GetTypeKind().(*Type_MessageType); ok { + return x.MessageType + } + return "" +} + +func (x *Type) GetTypeParam() string { + if x, ok := x.GetTypeKind().(*Type_TypeParam); ok { + return x.TypeParam + } + return "" +} + +func (x *Type) GetType() *Type { + if x, ok := x.GetTypeKind().(*Type_Type); ok { + return x.Type + } + return nil +} + +func (x *Type) GetError() *emptypb.Empty { + if x, ok := x.GetTypeKind().(*Type_Error); ok { + return x.Error + } + return nil +} + +func (x *Type) GetAbstractType() *Type_AbstractType { + if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok { + return x.AbstractType + } + return nil +} + +type isType_TypeKind interface { + isType_TypeKind() +} + +type Type_Dyn struct { + // Dynamic type. + Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"` +} + +type Type_Null struct { + // Null value. + Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Type_Primitive struct { + // Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`. + Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"` +} + +type Type_Wrapper struct { + // Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`. + Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"` +} + +type Type_WellKnown struct { + // Well-known protobuf type such as `google.protobuf.Timestamp`. + WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=google.api.expr.v1alpha1.Type_WellKnownType,oneof"` +} + +type Type_ListType_ struct { + // Parameterized list with elements of `list_type`, e.g. `list`. + ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"` +} + +type Type_MapType_ struct { + // Parameterized map with typed keys and values. + MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"` +} + +type Type_Function struct { + // Function type. + Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"` +} + +type Type_MessageType struct { + // Protocol buffer message type. + // + // The `message_type` string specifies the qualified message type name. For + // example, `google.plus.Profile`. + MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"` +} + +type Type_TypeParam struct { + // Type param type. + // + // The `type_param` string specifies the type parameter name, e.g. `list` + // would be a `list_type` whose element type was a `type_param` type + // named `E`. + TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"` +} + +type Type_Type struct { + // Type type. + // + // The `type` value specifies the target type. e.g. int is type with a + // target type of `Primitive.INT`. + Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"` +} + +type Type_Error struct { + // Error type. + // + // During type-checking if an expression is an error, its type is propagated + // as the `ERROR` type. This permits the type-checker to discover other + // errors present in the expression. + Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"` +} + +type Type_AbstractType_ struct { + // Abstract, application defined type. + AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"` +} + +func (*Type_Dyn) isType_TypeKind() {} + +func (*Type_Null) isType_TypeKind() {} + +func (*Type_Primitive) isType_TypeKind() {} + +func (*Type_Wrapper) isType_TypeKind() {} + +func (*Type_WellKnown) isType_TypeKind() {} + +func (*Type_ListType_) isType_TypeKind() {} + +func (*Type_MapType_) isType_TypeKind() {} + +func (*Type_Function) isType_TypeKind() {} + +func (*Type_MessageType) isType_TypeKind() {} + +func (*Type_TypeParam) isType_TypeKind() {} + +func (*Type_Type) isType_TypeKind() {} + +func (*Type_Error) isType_TypeKind() {} + +func (*Type_AbstractType_) isType_TypeKind() {} + +// Represents a declaration of a named value or function. +// +// A declaration is part of the contract between the expression, the agent +// evaluating that expression, and the caller requesting evaluation. +type Decl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the declaration. + // + // Declarations are organized in containers and this represents the full path + // to the declaration in its container, as in `google.api.expr.Decl`. + // + // Declarations used as + // [FunctionDecl.Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload] + // parameters may or may not have a name depending on whether the overload is + // function declaration or a function definition containing a result + // [Expr][google.api.expr.v1alpha1.Expr]. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The declaration kind. + // + // Types that are assignable to DeclKind: + // + // *Decl_Ident + // *Decl_Function + DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"` +} + +func (x *Decl) Reset() { + *x = Decl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl) ProtoMessage() {} + +func (x *Decl) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl.ProtoReflect.Descriptor instead. +func (*Decl) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2} +} + +func (x *Decl) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *Decl) GetDeclKind() isDecl_DeclKind { + if m != nil { + return m.DeclKind + } + return nil +} + +func (x *Decl) GetIdent() *Decl_IdentDecl { + if x, ok := x.GetDeclKind().(*Decl_Ident); ok { + return x.Ident + } + return nil +} + +func (x *Decl) GetFunction() *Decl_FunctionDecl { + if x, ok := x.GetDeclKind().(*Decl_Function); ok { + return x.Function + } + return nil +} + +type isDecl_DeclKind interface { + isDecl_DeclKind() +} + +type Decl_Ident struct { + // Identifier declaration. + Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"` +} + +type Decl_Function struct { + // Function declaration. + Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"` +} + +func (*Decl_Ident) isDecl_DeclKind() {} + +func (*Decl_Function) isDecl_DeclKind() {} + +// Describes a resolved reference to a declaration. +type Reference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the declaration. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // For references to functions, this is a list of `Overload.overload_id` + // values which match according to typing rules. + // + // If the list has more than one element, overload resolution among the + // presented candidates must happen at runtime because of dynamic types. The + // type checker attempts to narrow down this list as much as possible. + // + // Empty if this is not a reference to a + // [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl]. + OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + // For references to constants, this may contain the value of the + // constant if known at compile time. + Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Reference) Reset() { + *x = Reference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reference) ProtoMessage() {} + +func (x *Reference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reference.ProtoReflect.Descriptor instead. +func (*Reference) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3} +} + +func (x *Reference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Reference) GetOverloadId() []string { + if x != nil { + return x.OverloadId + } + return nil +} + +func (x *Reference) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +// List type with typed elements, e.g. `list`. +type Type_ListType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The element type. + ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"` +} + +func (x *Type_ListType) Reset() { + *x = Type_ListType{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_ListType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_ListType) ProtoMessage() {} + +func (x *Type_ListType) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead. +func (*Type_ListType) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Type_ListType) GetElemType() *Type { + if x != nil { + return x.ElemType + } + return nil +} + +// Map type with parameterized key and value types, e.g. `map`. +type Type_MapType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the key. + KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"` + // The type of the value. + ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"` +} + +func (x *Type_MapType) Reset() { + *x = Type_MapType{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_MapType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_MapType) ProtoMessage() {} + +func (x *Type_MapType) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead. +func (*Type_MapType) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Type_MapType) GetKeyType() *Type { + if x != nil { + return x.KeyType + } + return nil +} + +func (x *Type_MapType) GetValueType() *Type { + if x != nil { + return x.ValueType + } + return nil +} + +// Function type with result and arg types. +type Type_FunctionType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result type of the function. + ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + // Argument types of the function. + ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"` +} + +func (x *Type_FunctionType) Reset() { + *x = Type_FunctionType{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_FunctionType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_FunctionType) ProtoMessage() {} + +func (x *Type_FunctionType) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead. +func (*Type_FunctionType) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Type_FunctionType) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Type_FunctionType) GetArgTypes() []*Type { + if x != nil { + return x.ArgTypes + } + return nil +} + +// Application defined abstract type. +type Type_AbstractType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of this abstract type. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Parameter types for this abstract type. + ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"` +} + +func (x *Type_AbstractType) Reset() { + *x = Type_AbstractType{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Type_AbstractType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Type_AbstractType) ProtoMessage() {} + +func (x *Type_AbstractType) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead. +func (*Type_AbstractType) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Type_AbstractType) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Type_AbstractType) GetParameterTypes() []*Type { + if x != nil { + return x.ParameterTypes + } + return nil +} + +// Identifier declaration which specifies its type and optional `Expr` value. +// +// An identifier without a value is a declaration that must be provided at +// evaluation time. An identifier with a value should resolve to a constant, +// but may be used in conjunction with other identifiers bound at evaluation +// time. +type Decl_IdentDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The type of the identifier. + Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The constant value of the identifier. If not specified, the identifier + // must be supplied at evaluation time. + Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // Documentation string for the identifier. + Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_IdentDecl) Reset() { + *x = Decl_IdentDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_IdentDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_IdentDecl) ProtoMessage() {} + +func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead. +func (*Decl_IdentDecl) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *Decl_IdentDecl) GetType() *Type { + if x != nil { + return x.Type + } + return nil +} + +func (x *Decl_IdentDecl) GetValue() *Constant { + if x != nil { + return x.Value + } + return nil +} + +func (x *Decl_IdentDecl) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +// Function declaration specifies one or more overloads which indicate the +// function's parameter types and return type. +// +// Functions have no observable side-effects (there may be side-effects like +// logging which are not observable from CEL). +type Decl_FunctionDecl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. List of function overloads, must contain at least one overload. + Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"` +} + +func (x *Decl_FunctionDecl) Reset() { + *x = Decl_FunctionDecl{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl) ProtoMessage() {} + +func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload { + if x != nil { + return x.Overloads + } + return nil +} + +// An overload indicates a function's parameter types and return type, and +// may optionally include a function body described in terms of +// [Expr][google.api.expr.v1alpha1.Expr] values. +// +// Functions overloads are declared in either a function or method +// call-style. For methods, the `params[0]` is the expected type of the +// target receiver. +// +// Overloads must have non-overlapping argument types after erasure of all +// parameterized type variables (similar as type erasure in Java). +type Decl_FunctionDecl_Overload struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Globally unique overload name of the function which reflects + // the function name and argument types. + // + // This will be used by a [Reference][google.api.expr.v1alpha1.Reference] + // to indicate the `overload_id` that was resolved for the function + // `name`. + OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"` + // List of function parameter [Type][google.api.expr.v1alpha1.Type] + // values. + // + // Param types are disjoint after generic type parameters have been + // replaced with the type `DYN`. Since the `DYN` type is compatible with + // any other type, this means that if `A` is a type parameter, the + // function types `int` and `int` are not disjoint. Likewise, + // `map` is not disjoint from `map`. + // + // When the `result_type` of a function is a generic type param, the + // type param name also appears as the `type` of on at least one params. + Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"` + // The type param names associated with the function declaration. + // + // For example, `function ex(K key, map map) : V` would yield + // the type params of `K, V`. + TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"` + // Required. The result type of the function. For example, the operator + // `string.isEmpty()` would have `result_type` of `kind: BOOL`. + ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"` + // Whether the function is to be used in a method call-style `x.f(...)` + // or a function call-style `f(x, ...)`. + // + // For methods, the first parameter declaration, `params[0]` is the + // expected type of the target receiver. + IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"` + // Documentation string for the overload. + Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"` +} + +func (x *Decl_FunctionDecl_Overload) Reset() { + *x = Decl_FunctionDecl_Overload{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Decl_FunctionDecl_Overload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Decl_FunctionDecl_Overload) ProtoMessage() {} + +func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead. +func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1, 0} +} + +func (x *Decl_FunctionDecl_Overload) GetOverloadId() string { + if x != nil { + return x.OverloadId + } + return "" +} + +func (x *Decl_FunctionDecl_Overload) GetParams() []*Type { + if x != nil { + return x.Params + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string { + if x != nil { + return x.TypeParams + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetResultType() *Type { + if x != nil { + return x.ResultType + } + return nil +} + +func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool { + if x != nil { + return x.IsInstanceFunction + } + return false +} + +func (x *Decl_FunctionDecl_Overload) GetDoc() string { + if x != nil { + return x.Doc + } + return "" +} + +var File_google_api_expr_v1alpha1_checked_proto protoreflect.FileDescriptor + +var file_google_api_expr_v1alpha1_checked_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, + 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, + 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x04, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, + 0x45, 0x78, 0x70, 0x72, 0x12, 0x5c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, + 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, + 0x61, 0x70, 0x12, 0x4d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, + 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61, + 0x70, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72, + 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a, + 0x64, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0xc8, 0x0b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x4c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, + 0x12, 0x4d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, + 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, + 0x46, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x49, 0x0a, 0x08, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x47, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65, + 0x1a, 0x83, 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08, + 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x6b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x8c, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x6b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, + 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, + 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, + 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, + 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, + 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, + 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, + 0x0b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xb3, 0x05, 0x0a, + 0x04, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, + 0x63, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x8b, 0x01, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x44, 0x65, 0x63, 0x6c, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x64, 0x6f, 0x63, 0x1a, 0xee, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x52, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, + 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, + 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0x89, 0x02, 0x0a, 0x08, 0x4f, 0x76, + 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, + 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x1f, 0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x64, 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, + 0x6e, 0x64, 0x22, 0x7a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, + 0x61, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6c, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, + 0x44, 0x65, 0x63, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_expr_v1alpha1_checked_proto_rawDescOnce sync.Once + file_google_api_expr_v1alpha1_checked_proto_rawDescData = file_google_api_expr_v1alpha1_checked_proto_rawDesc +) + +func file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP() []byte { + file_google_api_expr_v1alpha1_checked_proto_rawDescOnce.Do(func() { + file_google_api_expr_v1alpha1_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_checked_proto_rawDescData) + }) + return file_google_api_expr_v1alpha1_checked_proto_rawDescData +} + +var file_google_api_expr_v1alpha1_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_expr_v1alpha1_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_google_api_expr_v1alpha1_checked_proto_goTypes = []interface{}{ + (Type_PrimitiveType)(0), // 0: google.api.expr.v1alpha1.Type.PrimitiveType + (Type_WellKnownType)(0), // 1: google.api.expr.v1alpha1.Type.WellKnownType + (*CheckedExpr)(nil), // 2: google.api.expr.v1alpha1.CheckedExpr + (*Type)(nil), // 3: google.api.expr.v1alpha1.Type + (*Decl)(nil), // 4: google.api.expr.v1alpha1.Decl + (*Reference)(nil), // 5: google.api.expr.v1alpha1.Reference + nil, // 6: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry + nil, // 7: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry + (*Type_ListType)(nil), // 8: google.api.expr.v1alpha1.Type.ListType + (*Type_MapType)(nil), // 9: google.api.expr.v1alpha1.Type.MapType + (*Type_FunctionType)(nil), // 10: google.api.expr.v1alpha1.Type.FunctionType + (*Type_AbstractType)(nil), // 11: google.api.expr.v1alpha1.Type.AbstractType + (*Decl_IdentDecl)(nil), // 12: google.api.expr.v1alpha1.Decl.IdentDecl + (*Decl_FunctionDecl)(nil), // 13: google.api.expr.v1alpha1.Decl.FunctionDecl + (*Decl_FunctionDecl_Overload)(nil), // 14: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload + (*SourceInfo)(nil), // 15: google.api.expr.v1alpha1.SourceInfo + (*Expr)(nil), // 16: google.api.expr.v1alpha1.Expr + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty + (structpb.NullValue)(0), // 18: google.protobuf.NullValue + (*Constant)(nil), // 19: google.api.expr.v1alpha1.Constant +} +var file_google_api_expr_v1alpha1_checked_proto_depIdxs = []int32{ + 6, // 0: google.api.expr.v1alpha1.CheckedExpr.reference_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry + 7, // 1: google.api.expr.v1alpha1.CheckedExpr.type_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry + 15, // 2: google.api.expr.v1alpha1.CheckedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo + 16, // 3: google.api.expr.v1alpha1.CheckedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr + 17, // 4: google.api.expr.v1alpha1.Type.dyn:type_name -> google.protobuf.Empty + 18, // 5: google.api.expr.v1alpha1.Type.null:type_name -> google.protobuf.NullValue + 0, // 6: google.api.expr.v1alpha1.Type.primitive:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType + 0, // 7: google.api.expr.v1alpha1.Type.wrapper:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType + 1, // 8: google.api.expr.v1alpha1.Type.well_known:type_name -> google.api.expr.v1alpha1.Type.WellKnownType + 8, // 9: google.api.expr.v1alpha1.Type.list_type:type_name -> google.api.expr.v1alpha1.Type.ListType + 9, // 10: google.api.expr.v1alpha1.Type.map_type:type_name -> google.api.expr.v1alpha1.Type.MapType + 10, // 11: google.api.expr.v1alpha1.Type.function:type_name -> google.api.expr.v1alpha1.Type.FunctionType + 3, // 12: google.api.expr.v1alpha1.Type.type:type_name -> google.api.expr.v1alpha1.Type + 17, // 13: google.api.expr.v1alpha1.Type.error:type_name -> google.protobuf.Empty + 11, // 14: google.api.expr.v1alpha1.Type.abstract_type:type_name -> google.api.expr.v1alpha1.Type.AbstractType + 12, // 15: google.api.expr.v1alpha1.Decl.ident:type_name -> google.api.expr.v1alpha1.Decl.IdentDecl + 13, // 16: google.api.expr.v1alpha1.Decl.function:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl + 19, // 17: google.api.expr.v1alpha1.Reference.value:type_name -> google.api.expr.v1alpha1.Constant + 5, // 18: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry.value:type_name -> google.api.expr.v1alpha1.Reference + 3, // 19: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry.value:type_name -> google.api.expr.v1alpha1.Type + 3, // 20: google.api.expr.v1alpha1.Type.ListType.elem_type:type_name -> google.api.expr.v1alpha1.Type + 3, // 21: google.api.expr.v1alpha1.Type.MapType.key_type:type_name -> google.api.expr.v1alpha1.Type + 3, // 22: google.api.expr.v1alpha1.Type.MapType.value_type:type_name -> google.api.expr.v1alpha1.Type + 3, // 23: google.api.expr.v1alpha1.Type.FunctionType.result_type:type_name -> google.api.expr.v1alpha1.Type + 3, // 24: google.api.expr.v1alpha1.Type.FunctionType.arg_types:type_name -> google.api.expr.v1alpha1.Type + 3, // 25: google.api.expr.v1alpha1.Type.AbstractType.parameter_types:type_name -> google.api.expr.v1alpha1.Type + 3, // 26: google.api.expr.v1alpha1.Decl.IdentDecl.type:type_name -> google.api.expr.v1alpha1.Type + 19, // 27: google.api.expr.v1alpha1.Decl.IdentDecl.value:type_name -> google.api.expr.v1alpha1.Constant + 14, // 28: google.api.expr.v1alpha1.Decl.FunctionDecl.overloads:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl.Overload + 3, // 29: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.params:type_name -> google.api.expr.v1alpha1.Type + 3, // 30: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.result_type:type_name -> google.api.expr.v1alpha1.Type + 31, // [31:31] is the sub-list for method output_type + 31, // [31:31] is the sub-list for method input_type + 31, // [31:31] is the sub-list for extension type_name + 31, // [31:31] is the sub-list for extension extendee + 0, // [0:31] is the sub-list for field type_name +} + +func init() { file_google_api_expr_v1alpha1_checked_proto_init() } +func file_google_api_expr_v1alpha1_checked_proto_init() { + if File_google_api_expr_v1alpha1_checked_proto != nil { + return + } + file_google_api_expr_v1alpha1_syntax_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_api_expr_v1alpha1_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_ListType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_MapType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_FunctionType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Type_AbstractType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_IdentDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Decl_FunctionDecl_Overload); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Type_Dyn)(nil), + (*Type_Null)(nil), + (*Type_Primitive)(nil), + (*Type_Wrapper)(nil), + (*Type_WellKnown)(nil), + (*Type_ListType_)(nil), + (*Type_MapType_)(nil), + (*Type_Function)(nil), + (*Type_MessageType)(nil), + (*Type_TypeParam)(nil), + (*Type_Type)(nil), + (*Type_Error)(nil), + (*Type_AbstractType_)(nil), + } + file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Decl_Ident)(nil), + (*Decl_Function)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_expr_v1alpha1_checked_proto_rawDesc, + NumEnums: 2, + NumMessages: 13, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_expr_v1alpha1_checked_proto_goTypes, + DependencyIndexes: file_google_api_expr_v1alpha1_checked_proto_depIdxs, + EnumInfos: file_google_api_expr_v1alpha1_checked_proto_enumTypes, + MessageInfos: file_google_api_expr_v1alpha1_checked_proto_msgTypes, + }.Build() + File_google_api_expr_v1alpha1_checked_proto = out.File + file_google_api_expr_v1alpha1_checked_proto_rawDesc = nil + file_google_api_expr_v1alpha1_checked_proto_goTypes = nil + file_google_api_expr_v1alpha1_checked_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go new file mode 100644 index 00000000..0a2ffb59 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go @@ -0,0 +1,580 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/expr/v1alpha1/eval.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The state of an evaluation. +// +// Can represent an inital, partial, or completed state of evaluation. +type EvalState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique values referenced in this message. + Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + // An ordered list of results. + // + // Tracks the flow of evaluation through the expression. + // May be sparse. + Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *EvalState) Reset() { + *x = EvalState{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState) ProtoMessage() {} + +func (x *EvalState) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState.ProtoReflect.Descriptor instead. +func (*EvalState) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0} +} + +func (x *EvalState) GetValues() []*ExprValue { + if x != nil { + return x.Values + } + return nil +} + +func (x *EvalState) GetResults() []*EvalState_Result { + if x != nil { + return x.Results + } + return nil +} + +// The value of an evaluated expression. +type ExprValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An expression can resolve to a value, error or unknown. + // + // Types that are assignable to Kind: + // + // *ExprValue_Value + // *ExprValue_Error + // *ExprValue_Unknown + Kind isExprValue_Kind `protobuf_oneof:"kind"` +} + +func (x *ExprValue) Reset() { + *x = ExprValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExprValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExprValue) ProtoMessage() {} + +func (x *ExprValue) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead. +func (*ExprValue) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{1} +} + +func (m *ExprValue) GetKind() isExprValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *ExprValue) GetValue() *Value { + if x, ok := x.GetKind().(*ExprValue_Value); ok { + return x.Value + } + return nil +} + +func (x *ExprValue) GetError() *ErrorSet { + if x, ok := x.GetKind().(*ExprValue_Error); ok { + return x.Error + } + return nil +} + +func (x *ExprValue) GetUnknown() *UnknownSet { + if x, ok := x.GetKind().(*ExprValue_Unknown); ok { + return x.Unknown + } + return nil +} + +type isExprValue_Kind interface { + isExprValue_Kind() +} + +type ExprValue_Value struct { + // A concrete value. + Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"` +} + +type ExprValue_Error struct { + // The set of errors in the critical path of evalution. + // + // Only errors in the critical path are included. For example, + // `( || true) && ` will only result in ``, + // while ` || ` will result in both `` and + // ``. + // + // Errors cause by the presence of other errors are not included in the + // set. For example `.foo`, `foo()`, and ` + 1` will + // only result in ``. + // + // Multiple errors *might* be included when evaluation could result + // in different errors. For example ` + ` and + // `foo(, )` may result in ``, `` or both. + // The exact subset of errors included for this case is unspecified and + // depends on the implementation details of the evaluator. + Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"` +} + +type ExprValue_Unknown struct { + // The set of unknowns in the critical path of evaluation. + // + // Unknown behaves identically to Error with regards to propagation. + // Specifically, only unknowns in the critical path are included, unknowns + // caused by the presence of other unknowns are not included, and multiple + // unknowns *might* be included included when evaluation could result in + // different unknowns. For example: + // + // ( || true) && -> + // || -> + // .foo -> + // foo() -> + // + -> or + // + // Unknown takes precidence over Error in cases where a `Value` can short + // circuit the result: + // + // || -> + // && -> + // + // Errors take precidence in all other cases: + // + // + -> + // foo(, ) -> + Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"` +} + +func (*ExprValue_Value) isExprValue_Kind() {} + +func (*ExprValue_Error) isExprValue_Kind() {} + +func (*ExprValue_Unknown) isExprValue_Kind() {} + +// A set of errors. +// +// The errors included depend on the context. See `ExprValue.error`. +type ErrorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The errors in the set. + Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"` +} + +func (x *ErrorSet) Reset() { + *x = ErrorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorSet) ProtoMessage() {} + +func (x *ErrorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead. +func (*ErrorSet) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{2} +} + +func (x *ErrorSet) GetErrors() []*status.Status { + if x != nil { + return x.Errors + } + return nil +} + +// A set of expressions for which the value is unknown. +// +// The unknowns included depend on the context. See `ExprValue.unknown`. +type UnknownSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ids of the expressions with unknown values. + Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"` +} + +func (x *UnknownSet) Reset() { + *x = UnknownSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnknownSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnknownSet) ProtoMessage() {} + +func (x *UnknownSet) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead. +func (*UnknownSet) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{3} +} + +func (x *UnknownSet) GetExprs() []int64 { + if x != nil { + return x.Exprs + } + return nil +} + +// A single evalution result. +type EvalState_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The id of the expression this result if for. + Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"` + // The index in `values` of the resulting value. + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EvalState_Result) Reset() { + *x = EvalState_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvalState_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvalState_Result) ProtoMessage() {} + +func (x *EvalState_Result) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead. +func (*EvalState_Result) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *EvalState_Result) GetExpr() int64 { + if x != nil { + return x.Expr + } + return 0 +} + +func (x *EvalState_Result) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +var File_google_api_expr_v1alpha1_eval_proto protoreflect.FileDescriptor + +var file_google_api_expr_v1alpha1_eval_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, + 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, + 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2, + 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0xca, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x6c, 0x0a, 0x1c, + 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x45, 0x76, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_expr_v1alpha1_eval_proto_rawDescOnce sync.Once + file_google_api_expr_v1alpha1_eval_proto_rawDescData = file_google_api_expr_v1alpha1_eval_proto_rawDesc +) + +func file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP() []byte { + file_google_api_expr_v1alpha1_eval_proto_rawDescOnce.Do(func() { + file_google_api_expr_v1alpha1_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_eval_proto_rawDescData) + }) + return file_google_api_expr_v1alpha1_eval_proto_rawDescData +} + +var file_google_api_expr_v1alpha1_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_api_expr_v1alpha1_eval_proto_goTypes = []interface{}{ + (*EvalState)(nil), // 0: google.api.expr.v1alpha1.EvalState + (*ExprValue)(nil), // 1: google.api.expr.v1alpha1.ExprValue + (*ErrorSet)(nil), // 2: google.api.expr.v1alpha1.ErrorSet + (*UnknownSet)(nil), // 3: google.api.expr.v1alpha1.UnknownSet + (*EvalState_Result)(nil), // 4: google.api.expr.v1alpha1.EvalState.Result + (*Value)(nil), // 5: google.api.expr.v1alpha1.Value + (*status.Status)(nil), // 6: google.rpc.Status +} +var file_google_api_expr_v1alpha1_eval_proto_depIdxs = []int32{ + 1, // 0: google.api.expr.v1alpha1.EvalState.values:type_name -> google.api.expr.v1alpha1.ExprValue + 4, // 1: google.api.expr.v1alpha1.EvalState.results:type_name -> google.api.expr.v1alpha1.EvalState.Result + 5, // 2: google.api.expr.v1alpha1.ExprValue.value:type_name -> google.api.expr.v1alpha1.Value + 2, // 3: google.api.expr.v1alpha1.ExprValue.error:type_name -> google.api.expr.v1alpha1.ErrorSet + 3, // 4: google.api.expr.v1alpha1.ExprValue.unknown:type_name -> google.api.expr.v1alpha1.UnknownSet + 6, // 5: google.api.expr.v1alpha1.ErrorSet.errors:type_name -> google.rpc.Status + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_api_expr_v1alpha1_eval_proto_init() } +func file_google_api_expr_v1alpha1_eval_proto_init() { + if File_google_api_expr_v1alpha1_eval_proto != nil { + return + } + file_google_api_expr_v1alpha1_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_api_expr_v1alpha1_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExprValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnknownSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvalState_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*ExprValue_Value)(nil), + (*ExprValue_Error)(nil), + (*ExprValue_Unknown)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_expr_v1alpha1_eval_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_expr_v1alpha1_eval_proto_goTypes, + DependencyIndexes: file_google_api_expr_v1alpha1_eval_proto_depIdxs, + MessageInfos: file_google_api_expr_v1alpha1_eval_proto_msgTypes, + }.Build() + File_google_api_expr_v1alpha1_eval_proto = out.File + file_google_api_expr_v1alpha1_eval_proto_rawDesc = nil + file_google_api_expr_v1alpha1_eval_proto_goTypes = nil + file_google_api_expr_v1alpha1_eval_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go new file mode 100644 index 00000000..57aaa2c9 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go @@ -0,0 +1,275 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/expr/v1alpha1/explain.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Values of intermediate expressions produced when evaluating expression. +// Deprecated, use `EvalState` instead. +// +// Deprecated: Do not use. +type Explain struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // All of the observed values. + // + // The field value_index is an index in the values list. + // Separating values from steps is needed to remove redundant values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + // List of steps. + // + // Repeated evaluations of the same expression generate new ExprStep + // instances. The order of such ExprStep instances matches the order of + // elements returned by Comprehension.iter_range. + ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"` +} + +func (x *Explain) Reset() { + *x = Explain{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain) ProtoMessage() {} + +func (x *Explain) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain.ProtoReflect.Descriptor instead. +func (*Explain) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0} +} + +func (x *Explain) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +func (x *Explain) GetExprSteps() []*Explain_ExprStep { + if x != nil { + return x.ExprSteps + } + return nil +} + +// ID and value index of one step. +type Explain_ExprStep struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID of corresponding Expr node. + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Index of the value in the values list. + ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"` +} + +func (x *Explain_ExprStep) Reset() { + *x = Explain_ExprStep{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Explain_ExprStep) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Explain_ExprStep) ProtoMessage() {} + +func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead. +func (*Explain_ExprStep) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Explain_ExprStep) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Explain_ExprStep) GetValueIndex() int32 { + if x != nil { + return x.ValueIndex + } + return 0 +} + +var File_google_api_expr_v1alpha1_explain_proto protoreflect.FileDescriptor + +var file_google_api_expr_v1alpha1_explain_proto_rawDesc = []byte{ + 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, + 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xce, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x37, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x49, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, + 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, + 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72, + 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x6f, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_api_expr_v1alpha1_explain_proto_rawDescOnce sync.Once + file_google_api_expr_v1alpha1_explain_proto_rawDescData = file_google_api_expr_v1alpha1_explain_proto_rawDesc +) + +func file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP() []byte { + file_google_api_expr_v1alpha1_explain_proto_rawDescOnce.Do(func() { + file_google_api_expr_v1alpha1_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_explain_proto_rawDescData) + }) + return file_google_api_expr_v1alpha1_explain_proto_rawDescData +} + +var file_google_api_expr_v1alpha1_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_expr_v1alpha1_explain_proto_goTypes = []interface{}{ + (*Explain)(nil), // 0: google.api.expr.v1alpha1.Explain + (*Explain_ExprStep)(nil), // 1: google.api.expr.v1alpha1.Explain.ExprStep + (*Value)(nil), // 2: google.api.expr.v1alpha1.Value +} +var file_google_api_expr_v1alpha1_explain_proto_depIdxs = []int32{ + 2, // 0: google.api.expr.v1alpha1.Explain.values:type_name -> google.api.expr.v1alpha1.Value + 1, // 1: google.api.expr.v1alpha1.Explain.expr_steps:type_name -> google.api.expr.v1alpha1.Explain.ExprStep + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_api_expr_v1alpha1_explain_proto_init() } +func file_google_api_expr_v1alpha1_explain_proto_init() { + if File_google_api_expr_v1alpha1_explain_proto != nil { + return + } + file_google_api_expr_v1alpha1_value_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_api_expr_v1alpha1_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Explain_ExprStep); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_expr_v1alpha1_explain_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_expr_v1alpha1_explain_proto_goTypes, + DependencyIndexes: file_google_api_expr_v1alpha1_explain_proto_depIdxs, + MessageInfos: file_google_api_expr_v1alpha1_explain_proto_msgTypes, + }.Build() + File_google_api_expr_v1alpha1_explain_proto = out.File + file_google_api_expr_v1alpha1_explain_proto_rawDesc = nil + file_google_api_expr_v1alpha1_explain_proto_goTypes = nil + file_google_api_expr_v1alpha1_explain_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go new file mode 100644 index 00000000..c90c6015 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go @@ -0,0 +1,2040 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/expr/v1alpha1/syntax.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CEL component specifier. +type SourceInfo_Extension_Component int32 + +const ( + // Unspecified, default. + SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0 + // Parser. Converts a CEL string to an AST. + SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1 + // Type checker. Checks that references in an AST are defined and types + // agree. + SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2 + // Runtime. Evaluates a parsed and optionally checked CEL AST against a + // context. + SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3 +) + +// Enum value maps for SourceInfo_Extension_Component. +var ( + SourceInfo_Extension_Component_name = map[int32]string{ + 0: "COMPONENT_UNSPECIFIED", + 1: "COMPONENT_PARSER", + 2: "COMPONENT_TYPE_CHECKER", + 3: "COMPONENT_RUNTIME", + } + SourceInfo_Extension_Component_value = map[string]int32{ + "COMPONENT_UNSPECIFIED": 0, + "COMPONENT_PARSER": 1, + "COMPONENT_TYPE_CHECKER": 2, + "COMPONENT_RUNTIME": 3, + } +) + +func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component { + p := new(SourceInfo_Extension_Component) + *p = x + return p +} + +func (x SourceInfo_Extension_Component) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_expr_v1alpha1_syntax_proto_enumTypes[0].Descriptor() +} + +func (SourceInfo_Extension_Component) Type() protoreflect.EnumType { + return &file_google_api_expr_v1alpha1_syntax_proto_enumTypes[0] +} + +func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead. +func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0, 0} +} + +// An expression together with source information as returned by the parser. +type ParsedExpr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The parsed expression. + Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"` + // The source info derived from input that generated the parsed `expr`. + SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"` +} + +func (x *ParsedExpr) Reset() { + *x = ParsedExpr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParsedExpr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParsedExpr) ProtoMessage() {} + +func (x *ParsedExpr) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead. +func (*ParsedExpr) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{0} +} + +func (x *ParsedExpr) GetExpr() *Expr { + if x != nil { + return x.Expr + } + return nil +} + +func (x *ParsedExpr) GetSourceInfo() *SourceInfo { + if x != nil { + return x.SourceInfo + } + return nil +} + +// An abstract representation of a common expression. +// +// Expressions are abstractly represented as a collection of identifiers, +// select statements, function calls, literals, and comprehensions. All +// operators with the exception of the '.' operator are modelled as function +// calls. This makes it easy to represent new operators into the existing AST. +// +// All references within expressions must resolve to a +// [Decl][google.api.expr.v1alpha1.Decl] provided at type-check for an +// expression to be valid. A reference may either be a bare identifier `name` or +// a qualified identifier `google.api.name`. References may either refer to a +// value or a function declaration. +// +// For example, the expression `google.api.name.startsWith('expr')` references +// the declaration `google.api.name` within a +// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the +// function declaration `startsWith`. +type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. An id assigned to this node by the parser which is unique in a + // given expression tree. This is used to associate type information and other + // attributes to a node in the parse tree. + Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // Required. Variants of expressions. + // + // Types that are assignable to ExprKind: + // + // *Expr_ConstExpr + // *Expr_IdentExpr + // *Expr_SelectExpr + // *Expr_CallExpr + // *Expr_ListExpr + // *Expr_StructExpr + // *Expr_ComprehensionExpr + ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"` +} + +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1} +} + +func (x *Expr) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr) GetExprKind() isExpr_ExprKind { + if m != nil { + return m.ExprKind + } + return nil +} + +func (x *Expr) GetConstExpr() *Constant { + if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok { + return x.ConstExpr + } + return nil +} + +func (x *Expr) GetIdentExpr() *Expr_Ident { + if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok { + return x.IdentExpr + } + return nil +} + +func (x *Expr) GetSelectExpr() *Expr_Select { + if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok { + return x.SelectExpr + } + return nil +} + +func (x *Expr) GetCallExpr() *Expr_Call { + if x, ok := x.GetExprKind().(*Expr_CallExpr); ok { + return x.CallExpr + } + return nil +} + +func (x *Expr) GetListExpr() *Expr_CreateList { + if x, ok := x.GetExprKind().(*Expr_ListExpr); ok { + return x.ListExpr + } + return nil +} + +func (x *Expr) GetStructExpr() *Expr_CreateStruct { + if x, ok := x.GetExprKind().(*Expr_StructExpr); ok { + return x.StructExpr + } + return nil +} + +func (x *Expr) GetComprehensionExpr() *Expr_Comprehension { + if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok { + return x.ComprehensionExpr + } + return nil +} + +type isExpr_ExprKind interface { + isExpr_ExprKind() +} + +type Expr_ConstExpr struct { + // A literal expression. + ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"` +} + +type Expr_IdentExpr struct { + // An identifier expression. + IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"` +} + +type Expr_SelectExpr struct { + // A field selection expression, e.g. `request.auth`. + SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"` +} + +type Expr_CallExpr struct { + // A call expression, including calls to predefined functions and operators. + CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"` +} + +type Expr_ListExpr struct { + // A list creation expression. + ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"` +} + +type Expr_StructExpr struct { + // A map or message creation expression. + StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"` +} + +type Expr_ComprehensionExpr struct { + // A comprehension expression. + ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"` +} + +func (*Expr_ConstExpr) isExpr_ExprKind() {} + +func (*Expr_IdentExpr) isExpr_ExprKind() {} + +func (*Expr_SelectExpr) isExpr_ExprKind() {} + +func (*Expr_CallExpr) isExpr_ExprKind() {} + +func (*Expr_ListExpr) isExpr_ExprKind() {} + +func (*Expr_StructExpr) isExpr_ExprKind() {} + +func (*Expr_ComprehensionExpr) isExpr_ExprKind() {} + +// Represents a primitive literal. +// +// Named 'Constant' here for backwards compatibility. +// +// This is similar as the primitives supported in the well-known type +// `google.protobuf.Value`, but richer so it can represent CEL's full range of +// primitives. +// +// Lists and structs are not included as constants as these aggregate types may +// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require +// evaluation and are thus not constant. +// +// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`, +// `true`, `null`. +type Constant struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The valid constant kinds. + // + // Types that are assignable to ConstantKind: + // + // *Constant_NullValue + // *Constant_BoolValue + // *Constant_Int64Value + // *Constant_Uint64Value + // *Constant_DoubleValue + // *Constant_StringValue + // *Constant_BytesValue + // *Constant_DurationValue + // *Constant_TimestampValue + ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"` +} + +func (x *Constant) Reset() { + *x = Constant{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Constant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Constant) ProtoMessage() {} + +func (x *Constant) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Constant.ProtoReflect.Descriptor instead. +func (*Constant) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{2} +} + +func (m *Constant) GetConstantKind() isConstant_ConstantKind { + if m != nil { + return m.ConstantKind + } + return nil +} + +func (x *Constant) GetNullValue() structpb.NullValue { + if x, ok := x.GetConstantKind().(*Constant_NullValue); ok { + return x.NullValue + } + return structpb.NullValue_NULL_VALUE +} + +func (x *Constant) GetBoolValue() bool { + if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Constant) GetInt64Value() int64 { + if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Constant) GetUint64Value() uint64 { + if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Constant) GetDoubleValue() float64 { + if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Constant) GetStringValue() string { + if x, ok := x.GetConstantKind().(*Constant_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Constant) GetBytesValue() []byte { + if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok { + return x.BytesValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetDurationValue() *durationpb.Duration { + if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok { + return x.DurationValue + } + return nil +} + +// Deprecated: Do not use. +func (x *Constant) GetTimestampValue() *timestamppb.Timestamp { + if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok { + return x.TimestampValue + } + return nil +} + +type isConstant_ConstantKind interface { + isConstant_ConstantKind() +} + +type Constant_NullValue struct { + // null value. + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Constant_BoolValue struct { + // boolean value. + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Constant_Int64Value struct { + // int64 value. + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Constant_Uint64Value struct { + // uint64 value. + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Constant_DoubleValue struct { + // double value. + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Constant_StringValue struct { + // string value. + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Constant_BytesValue struct { + // bytes value. + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Constant_DurationValue struct { + // protobuf.Duration value. + // + // Deprecated: duration is no longer considered a builtin cel type. + // + // Deprecated: Do not use. + DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"` +} + +type Constant_TimestampValue struct { + // protobuf.Timestamp value. + // + // Deprecated: timestamp is no longer considered a builtin cel type. + // + // Deprecated: Do not use. + TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"` +} + +func (*Constant_NullValue) isConstant_ConstantKind() {} + +func (*Constant_BoolValue) isConstant_ConstantKind() {} + +func (*Constant_Int64Value) isConstant_ConstantKind() {} + +func (*Constant_Uint64Value) isConstant_ConstantKind() {} + +func (*Constant_DoubleValue) isConstant_ConstantKind() {} + +func (*Constant_StringValue) isConstant_ConstantKind() {} + +func (*Constant_BytesValue) isConstant_ConstantKind() {} + +func (*Constant_DurationValue) isConstant_ConstantKind() {} + +func (*Constant_TimestampValue) isConstant_ConstantKind() {} + +// Source information collected at parse time. +type SourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The syntax version of the source, e.g. `cel1`. + SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"` + // The location name. All position information attached to an expression is + // relative to this location. + // + // The location could be a file, UI element, or similar. For example, + // `acme/app/AnvilPolicy.cel`. + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // Monotonically increasing list of code point offsets where newlines + // `\n` appear. + // + // The line number of a given position is the index `i` where for a given + // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The + // column may be derivd from `id_positions[id] - line_offsets[i]`. + LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"` + // A map from the parse node id (e.g. `Expr.id`) to the code point offset + // within the source. + Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // A map from the parse node id where a macro replacement was made to the + // call `Expr` that resulted in a macro expansion. + // + // For example, `has(value.field)` is a function call that is replaced by a + // `test_only` field selection in the AST. Likewise, the call + // `list.exists(e, e > 10)` translates to a comprehension expression. The key + // in the map corresponds to the expression id of the expanded macro, and the + // value is the call `Expr` that was replaced. + MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // A list of tags for extensions that were used while parsing or type checking + // the source expression. For example, optimizations that require special + // runtime support may be specified. + // + // These are used to check feature support between components in separate + // implementations. This can be used to either skip redundant work or + // report an error if the extension is unsupported. + Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *SourceInfo) Reset() { + *x = SourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo) ProtoMessage() {} + +func (x *SourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead. +func (*SourceInfo) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3} +} + +func (x *SourceInfo) GetSyntaxVersion() string { + if x != nil { + return x.SyntaxVersion + } + return "" +} + +func (x *SourceInfo) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourceInfo) GetLineOffsets() []int32 { + if x != nil { + return x.LineOffsets + } + return nil +} + +func (x *SourceInfo) GetPositions() map[int64]int32 { + if x != nil { + return x.Positions + } + return nil +} + +func (x *SourceInfo) GetMacroCalls() map[int64]*Expr { + if x != nil { + return x.MacroCalls + } + return nil +} + +func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension { + if x != nil { + return x.Extensions + } + return nil +} + +// A specific position in source. +type SourcePosition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The soucre location name (e.g. file name). + Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"` + // The UTF-8 code unit offset. + Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` + // The 1-based index of the starting line in the source text + // where the issue occurs, or 0 if unknown. + Line int32 `protobuf:"varint,3,opt,name=line,proto3" json:"line,omitempty"` + // The 0-based index of the starting position within the line of source text + // where the issue occurs. Only meaningful if line is nonzero. + Column int32 `protobuf:"varint,4,opt,name=column,proto3" json:"column,omitempty"` +} + +func (x *SourcePosition) Reset() { + *x = SourcePosition{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourcePosition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourcePosition) ProtoMessage() {} + +func (x *SourcePosition) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourcePosition.ProtoReflect.Descriptor instead. +func (*SourcePosition) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{4} +} + +func (x *SourcePosition) GetLocation() string { + if x != nil { + return x.Location + } + return "" +} + +func (x *SourcePosition) GetOffset() int32 { + if x != nil { + return x.Offset + } + return 0 +} + +func (x *SourcePosition) GetLine() int32 { + if x != nil { + return x.Line + } + return 0 +} + +func (x *SourcePosition) GetColumn() int32 { + if x != nil { + return x.Column + } + return 0 +} + +// An identifier expression. e.g. `request`. +type Expr_Ident struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Holds a single, unqualified identifier, possibly preceded by a + // '.'. + // + // Qualified names are represented by the + // [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *Expr_Ident) Reset() { + *x = Expr_Ident{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Ident) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Ident) ProtoMessage() {} + +func (x *Expr_Ident) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead. +func (*Expr_Ident) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Expr_Ident) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +// A field selection expression. e.g. `request.auth`. +type Expr_Select struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The target of the selection expression. + // + // For example, in the select expression `request.auth`, the `request` + // portion of the expression is the `operand`. + Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"` + // Required. The name of the field to select. + // + // For example, in the select expression `request.auth`, the `auth` portion + // of the expression would be the `field`. + Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"` + // Whether the select is to be interpreted as a field presence test. + // + // This results from the macro `has(request.auth)`. + TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"` +} + +func (x *Expr_Select) Reset() { + *x = Expr_Select{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Select) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Select) ProtoMessage() {} + +func (x *Expr_Select) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead. +func (*Expr_Select) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 1} +} + +func (x *Expr_Select) GetOperand() *Expr { + if x != nil { + return x.Operand + } + return nil +} + +func (x *Expr_Select) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *Expr_Select) GetTestOnly() bool { + if x != nil { + return x.TestOnly + } + return false +} + +// A call expression, including calls to predefined functions and operators. +// +// For example, `value == 10`, `size(map_value)`. +type Expr_Call struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The target of an method call-style expression. For example, `x` in + // `x.f()`. + Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + // Required. The name of the function or method being called. + Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"` + // The arguments. + Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"` +} + +func (x *Expr_Call) Reset() { + *x = Expr_Call{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Call) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Call) ProtoMessage() {} + +func (x *Expr_Call) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead. +func (*Expr_Call) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 2} +} + +func (x *Expr_Call) GetTarget() *Expr { + if x != nil { + return x.Target + } + return nil +} + +func (x *Expr_Call) GetFunction() string { + if x != nil { + return x.Function + } + return "" +} + +func (x *Expr_Call) GetArgs() []*Expr { + if x != nil { + return x.Args + } + return nil +} + +// A list creation expression. +// +// Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogeneous, e.g. +// `dyn([1, 'hello', 2.0])` +type Expr_CreateList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The elements part of the list. + Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"` + // The indices within the elements list which are marked as optional + // elements. + // + // When an optional-typed value is present, the value it contains + // is included in the list. If the optional-typed value is absent, the list + // element is omitted from the CreateList result. + OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"` +} + +func (x *Expr_CreateList) Reset() { + *x = Expr_CreateList{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateList) ProtoMessage() {} + +func (x *Expr_CreateList) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead. +func (*Expr_CreateList) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 3} +} + +func (x *Expr_CreateList) GetElements() []*Expr { + if x != nil { + return x.Elements + } + return nil +} + +func (x *Expr_CreateList) GetOptionalIndices() []int32 { + if x != nil { + return x.OptionalIndices + } + return nil +} + +// A map or message creation expression. +// +// Maps are constructed as `{'key_name': 'value'}`. Message construction is +// similar, but prefixed with a type name and composed of field ids: +// `types.MyType{field_id: 'value'}`. +type Expr_CreateStruct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type name of the message to be created, empty when creating map + // literals. + MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"` + // The entries in the creation expression. + Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *Expr_CreateStruct) Reset() { + *x = Expr_CreateStruct{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct) ProtoMessage() {} + +func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4} +} + +func (x *Expr_CreateStruct) GetMessageName() string { + if x != nil { + return x.MessageName + } + return "" +} + +func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry { + if x != nil { + return x.Entries + } + return nil +} + +// A comprehension expression applied to a list or map. +// +// Comprehensions are not part of the core syntax, but enabled with macros. +// A macro matches a specific call signature within a parsed AST and replaces +// the call with an alternate AST block. Macro expansion happens at parse +// time. +// +// The following macros are supported within CEL: +// +// Aggregate type macros may be applied to all elements in a list or all keys +// in a map: +// +// - `all`, `exists`, `exists_one` - test a predicate expression against +// the inputs and return `true` if the predicate is satisfied for all, +// any, or only one value `list.all(x, x < 10)`. +// - `filter` - test a predicate expression against the inputs and return +// the subset of elements which satisfy the predicate: +// `payments.filter(p, p > 1000)`. +// - `map` - apply an expression to all elements in the input and return the +// output aggregate type: `[1, 2, 3].map(i, i * i)`. +// +// The `has(m.x)` macro tests whether the property `x` is present in struct +// `m`. The semantics of this macro depend on the type of `m`. For proto2 +// messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the +// macro tests whether the property is set to its default. For map and struct +// types, the macro tests whether the property `x` is defined on `m`. +// +// Comprehensions for the standard environment macros evaluation can be best +// visualized as the following pseudocode: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` +// +// Comprehensions for the optional V2 macros which support map-to-map +// translation differ slightly from the standard environment macros in that +// they expose both the key or index in addition to the value for each list +// or map entry: +// +// ``` +// let `accu_var` = `accu_init` +// +// for (let `iter_var`, `iter_var2` in `iter_range`) { +// if (!`loop_condition`) { +// break +// } +// `accu_var` = `loop_step` +// } +// +// return `result` +// ``` +type Expr_Comprehension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the first iteration variable. + // When the iter_range is a list, this variable is the list element. + // When the iter_range is a map, this variable is the map entry key. + IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"` + // The name of the second iteration variable, empty if not set. + // When the iter_range is a list, this variable is the integer index. + // When the iter_range is a map, this variable is the map entry value. + // This field is only set for comprehension v2 macros. + IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"` + // The range over which the comprehension iterates. + IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"` + // The name of the variable used for accumulation of the result. + AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"` + // The initial value of the accumulator. + AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"` + // An expression which can contain iter_var, iter_var2, and accu_var. + // + // Returns false when the result has been computed and may be used as + // a hint to short-circuit the remainder of the comprehension. + LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"` + // An expression which can contain iter_var, iter_var2, and accu_var. + // + // Computes the next value of accu_var. + LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"` + // An expression which can contain accu_var. + // + // Computes the result. + Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"` +} + +func (x *Expr_Comprehension) Reset() { + *x = Expr_Comprehension{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_Comprehension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_Comprehension) ProtoMessage() {} + +func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead. +func (*Expr_Comprehension) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 5} +} + +func (x *Expr_Comprehension) GetIterVar() string { + if x != nil { + return x.IterVar + } + return "" +} + +func (x *Expr_Comprehension) GetIterVar2() string { + if x != nil { + return x.IterVar2 + } + return "" +} + +func (x *Expr_Comprehension) GetIterRange() *Expr { + if x != nil { + return x.IterRange + } + return nil +} + +func (x *Expr_Comprehension) GetAccuVar() string { + if x != nil { + return x.AccuVar + } + return "" +} + +func (x *Expr_Comprehension) GetAccuInit() *Expr { + if x != nil { + return x.AccuInit + } + return nil +} + +func (x *Expr_Comprehension) GetLoopCondition() *Expr { + if x != nil { + return x.LoopCondition + } + return nil +} + +func (x *Expr_Comprehension) GetLoopStep() *Expr { + if x != nil { + return x.LoopStep + } + return nil +} + +func (x *Expr_Comprehension) GetResult() *Expr { + if x != nil { + return x.Result + } + return nil +} + +// Represents an entry. +type Expr_CreateStruct_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. An id assigned to this node by the parser which is unique + // in a given expression tree. This is used to associate type + // information and other attributes to the node. + Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // The `Entry` key kinds. + // + // Types that are assignable to KeyKind: + // + // *Expr_CreateStruct_Entry_FieldKey + // *Expr_CreateStruct_Entry_MapKey + KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"` + // Required. The value assigned to the key. + // + // If the optional_entry field is true, the expression must resolve to an + // optional-typed value. If the optional value is present, the key will be + // set; however, if the optional value is absent, the key will be unset. + Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` + // Whether the key-value pair is optional. + OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"` +} + +func (x *Expr_CreateStruct_Entry) Reset() { + *x = Expr_CreateStruct_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Expr_CreateStruct_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Expr_CreateStruct_Entry) ProtoMessage() {} + +func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead. +func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4, 0} +} + +func (x *Expr_CreateStruct_Entry) GetId() int64 { + if x != nil { + return x.Id + } + return 0 +} + +func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind { + if m != nil { + return m.KeyKind + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetFieldKey() string { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok { + return x.FieldKey + } + return "" +} + +func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr { + if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok { + return x.MapKey + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetValue() *Expr { + if x != nil { + return x.Value + } + return nil +} + +func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool { + if x != nil { + return x.OptionalEntry + } + return false +} + +type isExpr_CreateStruct_Entry_KeyKind interface { + isExpr_CreateStruct_Entry_KeyKind() +} + +type Expr_CreateStruct_Entry_FieldKey struct { + // The field key for a message creator statement. + FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"` +} + +type Expr_CreateStruct_Entry_MapKey struct { + // The key expression for a map creation statement. + MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"` +} + +func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {} + +func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {} + +// An extension that was requested for the source expression. +type SourceInfo_Extension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for the extension. Example: constant_folding + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // If set, the listed components must understand the extension for the + // expression to evaluate correctly. + // + // This field has set semantics, repeated values should be deduplicated. + AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=google.api.expr.v1alpha1.SourceInfo_Extension_Component" json:"affected_components,omitempty"` + // Version info. May be skipped if it isn't meaningful for the extension. + // (for example constant_folding might always be v0.0). + Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *SourceInfo_Extension) Reset() { + *x = SourceInfo_Extension{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension) ProtoMessage() {} + +func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *SourceInfo_Extension) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component { + if x != nil { + return x.AffectedComponents + } + return nil +} + +func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version { + if x != nil { + return x.Version + } + return nil +} + +// Version +type SourceInfo_Extension_Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Major version changes indicate different required support level from + // the required components. + Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + // Minor version changes must not change the observed behavior from + // existing implementations, but may be provided informationally. + Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` +} + +func (x *SourceInfo_Extension_Version) Reset() { + *x = SourceInfo_Extension_Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceInfo_Extension_Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceInfo_Extension_Version) ProtoMessage() {} + +func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead. +func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0, 0} +} + +func (x *SourceInfo_Extension_Version) GetMajor() int64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *SourceInfo_Extension_Version) GetMinor() int64 { + if x != nil { + return x.Minor + } + return 0 +} + +var File_google_api_expr_v1alpha1_syntax_proto protoreflect.FileDescriptor + +var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, + 0x32, 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, + 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, + 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45, + 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, + 0x48, 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x42, 0x0a, 0x09, 0x63, 0x61, 0x6c, + 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, + 0x6c, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x48, 0x0a, + 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, + 0x69, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x5d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, + 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x38, 0x0a, + 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x43, + 0x61, 0x6c, 0x6c, 0x12, 0x36, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x73, 0x0a, 0x0a, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, + 0x0f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, + 0x1a, 0xdb, 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x1a, 0xda, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x08, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x6d, 0x61, + 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, + 0x61, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a, + 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69, + 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74, + 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f, + 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56, + 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12, + 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, + 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73, + 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53, + 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, + 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65, + 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, + 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, + 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, + 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48, + 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63, + 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a, + 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61, + 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, + 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, + 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, + 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, + 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, + 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, + 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c, + 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f, + 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, + 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a, + 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53, + 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce sync.Once + file_google_api_expr_v1alpha1_syntax_proto_rawDescData = file_google_api_expr_v1alpha1_syntax_proto_rawDesc +) + +func file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP() []byte { + file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce.Do(func() { + file_google_api_expr_v1alpha1_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_syntax_proto_rawDescData) + }) + return file_google_api_expr_v1alpha1_syntax_proto_rawDescData +} + +var file_google_api_expr_v1alpha1_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_expr_v1alpha1_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_expr_v1alpha1_syntax_proto_goTypes = []interface{}{ + (SourceInfo_Extension_Component)(0), // 0: google.api.expr.v1alpha1.SourceInfo.Extension.Component + (*ParsedExpr)(nil), // 1: google.api.expr.v1alpha1.ParsedExpr + (*Expr)(nil), // 2: google.api.expr.v1alpha1.Expr + (*Constant)(nil), // 3: google.api.expr.v1alpha1.Constant + (*SourceInfo)(nil), // 4: google.api.expr.v1alpha1.SourceInfo + (*SourcePosition)(nil), // 5: google.api.expr.v1alpha1.SourcePosition + (*Expr_Ident)(nil), // 6: google.api.expr.v1alpha1.Expr.Ident + (*Expr_Select)(nil), // 7: google.api.expr.v1alpha1.Expr.Select + (*Expr_Call)(nil), // 8: google.api.expr.v1alpha1.Expr.Call + (*Expr_CreateList)(nil), // 9: google.api.expr.v1alpha1.Expr.CreateList + (*Expr_CreateStruct)(nil), // 10: google.api.expr.v1alpha1.Expr.CreateStruct + (*Expr_Comprehension)(nil), // 11: google.api.expr.v1alpha1.Expr.Comprehension + (*Expr_CreateStruct_Entry)(nil), // 12: google.api.expr.v1alpha1.Expr.CreateStruct.Entry + (*SourceInfo_Extension)(nil), // 13: google.api.expr.v1alpha1.SourceInfo.Extension + nil, // 14: google.api.expr.v1alpha1.SourceInfo.PositionsEntry + nil, // 15: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry + (*SourceInfo_Extension_Version)(nil), // 16: google.api.expr.v1alpha1.SourceInfo.Extension.Version + (structpb.NullValue)(0), // 17: google.protobuf.NullValue + (*durationpb.Duration)(nil), // 18: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp +} +var file_google_api_expr_v1alpha1_syntax_proto_depIdxs = []int32{ + 2, // 0: google.api.expr.v1alpha1.ParsedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr + 4, // 1: google.api.expr.v1alpha1.ParsedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo + 3, // 2: google.api.expr.v1alpha1.Expr.const_expr:type_name -> google.api.expr.v1alpha1.Constant + 6, // 3: google.api.expr.v1alpha1.Expr.ident_expr:type_name -> google.api.expr.v1alpha1.Expr.Ident + 7, // 4: google.api.expr.v1alpha1.Expr.select_expr:type_name -> google.api.expr.v1alpha1.Expr.Select + 8, // 5: google.api.expr.v1alpha1.Expr.call_expr:type_name -> google.api.expr.v1alpha1.Expr.Call + 9, // 6: google.api.expr.v1alpha1.Expr.list_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateList + 10, // 7: google.api.expr.v1alpha1.Expr.struct_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct + 11, // 8: google.api.expr.v1alpha1.Expr.comprehension_expr:type_name -> google.api.expr.v1alpha1.Expr.Comprehension + 17, // 9: google.api.expr.v1alpha1.Constant.null_value:type_name -> google.protobuf.NullValue + 18, // 10: google.api.expr.v1alpha1.Constant.duration_value:type_name -> google.protobuf.Duration + 19, // 11: google.api.expr.v1alpha1.Constant.timestamp_value:type_name -> google.protobuf.Timestamp + 14, // 12: google.api.expr.v1alpha1.SourceInfo.positions:type_name -> google.api.expr.v1alpha1.SourceInfo.PositionsEntry + 15, // 13: google.api.expr.v1alpha1.SourceInfo.macro_calls:type_name -> google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry + 13, // 14: google.api.expr.v1alpha1.SourceInfo.extensions:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension + 2, // 15: google.api.expr.v1alpha1.Expr.Select.operand:type_name -> google.api.expr.v1alpha1.Expr + 2, // 16: google.api.expr.v1alpha1.Expr.Call.target:type_name -> google.api.expr.v1alpha1.Expr + 2, // 17: google.api.expr.v1alpha1.Expr.Call.args:type_name -> google.api.expr.v1alpha1.Expr + 2, // 18: google.api.expr.v1alpha1.Expr.CreateList.elements:type_name -> google.api.expr.v1alpha1.Expr + 12, // 19: google.api.expr.v1alpha1.Expr.CreateStruct.entries:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct.Entry + 2, // 20: google.api.expr.v1alpha1.Expr.Comprehension.iter_range:type_name -> google.api.expr.v1alpha1.Expr + 2, // 21: google.api.expr.v1alpha1.Expr.Comprehension.accu_init:type_name -> google.api.expr.v1alpha1.Expr + 2, // 22: google.api.expr.v1alpha1.Expr.Comprehension.loop_condition:type_name -> google.api.expr.v1alpha1.Expr + 2, // 23: google.api.expr.v1alpha1.Expr.Comprehension.loop_step:type_name -> google.api.expr.v1alpha1.Expr + 2, // 24: google.api.expr.v1alpha1.Expr.Comprehension.result:type_name -> google.api.expr.v1alpha1.Expr + 2, // 25: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.map_key:type_name -> google.api.expr.v1alpha1.Expr + 2, // 26: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.value:type_name -> google.api.expr.v1alpha1.Expr + 0, // 27: google.api.expr.v1alpha1.SourceInfo.Extension.affected_components:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension.Component + 16, // 28: google.api.expr.v1alpha1.SourceInfo.Extension.version:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension.Version + 2, // 29: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry.value:type_name -> google.api.expr.v1alpha1.Expr + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_google_api_expr_v1alpha1_syntax_proto_init() } +func file_google_api_expr_v1alpha1_syntax_proto_init() { + if File_google_api_expr_v1alpha1_syntax_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParsedExpr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Constant); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourcePosition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Ident); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Select); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Call); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_Comprehension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr_CreateStruct_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceInfo_Extension_Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Expr_ConstExpr)(nil), + (*Expr_IdentExpr)(nil), + (*Expr_SelectExpr)(nil), + (*Expr_CallExpr)(nil), + (*Expr_ListExpr)(nil), + (*Expr_StructExpr)(nil), + (*Expr_ComprehensionExpr)(nil), + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*Constant_NullValue)(nil), + (*Constant_BoolValue)(nil), + (*Constant_Int64Value)(nil), + (*Constant_Uint64Value)(nil), + (*Constant_DoubleValue)(nil), + (*Constant_StringValue)(nil), + (*Constant_BytesValue)(nil), + (*Constant_DurationValue)(nil), + (*Constant_TimestampValue)(nil), + } + file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*Expr_CreateStruct_Entry_FieldKey)(nil), + (*Expr_CreateStruct_Entry_MapKey)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_expr_v1alpha1_syntax_proto_rawDesc, + NumEnums: 1, + NumMessages: 16, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_expr_v1alpha1_syntax_proto_goTypes, + DependencyIndexes: file_google_api_expr_v1alpha1_syntax_proto_depIdxs, + EnumInfos: file_google_api_expr_v1alpha1_syntax_proto_enumTypes, + MessageInfos: file_google_api_expr_v1alpha1_syntax_proto_msgTypes, + }.Build() + File_google_api_expr_v1alpha1_syntax_proto = out.File + file_google_api_expr_v1alpha1_syntax_proto_rawDesc = nil + file_google_api_expr_v1alpha1_syntax_proto_goTypes = nil + file_google_api_expr_v1alpha1_syntax_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go new file mode 100644 index 00000000..0a5ca6a1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go @@ -0,0 +1,721 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/expr/v1alpha1/value.proto + +package expr + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + structpb "google.golang.org/protobuf/types/known/structpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Represents a CEL value. +// +// This is similar to `google.protobuf.Value`, but can represent CEL's full +// range of values. +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The valid kinds of values. + // + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_BoolValue + // *Value_Int64Value + // *Value_Uint64Value + // *Value_DoubleValue + // *Value_StringValue + // *Value_BytesValue + // *Value_EnumValue + // *Value_ObjectValue + // *Value_MapValue + // *Value_ListValue + // *Value_TypeValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{0} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() structpb.NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return structpb.NullValue_NULL_VALUE +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetInt64Value() int64 { + if x, ok := x.GetKind().(*Value_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (x *Value) GetUint64Value() uint64 { + if x, ok := x.GetKind().(*Value_Uint64Value); ok { + return x.Uint64Value + } + return 0 +} + +func (x *Value) GetDoubleValue() float64 { + if x, ok := x.GetKind().(*Value_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBytesValue() []byte { + if x, ok := x.GetKind().(*Value_BytesValue); ok { + return x.BytesValue + } + return nil +} + +func (x *Value) GetEnumValue() *EnumValue { + if x, ok := x.GetKind().(*Value_EnumValue); ok { + return x.EnumValue + } + return nil +} + +func (x *Value) GetObjectValue() *anypb.Any { + if x, ok := x.GetKind().(*Value_ObjectValue); ok { + return x.ObjectValue + } + return nil +} + +func (x *Value) GetMapValue() *MapValue { + if x, ok := x.GetKind().(*Value_MapValue); ok { + return x.MapValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +func (x *Value) GetTypeValue() string { + if x, ok := x.GetKind().(*Value_TypeValue); ok { + return x.TypeValue + } + return "" +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Null value. + NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_BoolValue struct { + // Boolean value. + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_Int64Value struct { + // Signed integer value. + Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Value_Uint64Value struct { + // Unsigned integer value. + Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"` +} + +type Value_DoubleValue struct { + // Floating point value. + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Value_StringValue struct { + // UTF-8 string value. + StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BytesValue struct { + // Byte string value. + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +type Value_EnumValue struct { + // An enum value. + EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"` +} + +type Value_ObjectValue struct { + // The proto message backing an object value. + ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"` +} + +type Value_MapValue struct { + // Map value. + MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"` +} + +type Value_ListValue struct { + // List value. + ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"` +} + +type Value_TypeValue struct { + // Type value. + TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_Int64Value) isValue_Kind() {} + +func (*Value_Uint64Value) isValue_Kind() {} + +func (*Value_DoubleValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BytesValue) isValue_Kind() {} + +func (*Value_EnumValue) isValue_Kind() {} + +func (*Value_ObjectValue) isValue_Kind() {} + +func (*Value_MapValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (*Value_TypeValue) isValue_Kind() {} + +// An enum value. +type EnumValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the enum type. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The value of the enum. + Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *EnumValue) Reset() { + *x = EnumValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValue) ProtoMessage() {} + +func (x *EnumValue) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead. +func (*EnumValue) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{1} +} + +func (x *EnumValue) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EnumValue) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// A list. +// +// Wrapped in a message so 'not set' and empty can be differentiated, which is +// required for use in a 'oneof'. +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ordered values in the list. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +// A map. +// +// Wrapped in a message so 'not set' and empty can be differentiated, which is +// required for use in a 'oneof'. +type MapValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of map entries. + // + // CEL has fewer restrictions on keys, so a protobuf map represenation + // cannot be used. + Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *MapValue) Reset() { + *x = MapValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue) ProtoMessage() {} + +func (x *MapValue) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue.ProtoReflect.Descriptor instead. +func (*MapValue) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3} +} + +func (x *MapValue) GetEntries() []*MapValue_Entry { + if x != nil { + return x.Entries + } + return nil +} + +// An entry in the map. +type MapValue_Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The key. + // + // Must be unique with in the map. + // Currently only boolean, int, uint, and string values can be keys. + Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value. + Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MapValue_Entry) Reset() { + *x = MapValue_Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapValue_Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapValue_Entry) ProtoMessage() {} + +func (x *MapValue_Entry) ProtoReflect() protoreflect.Message { + mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead. +func (*MapValue_Entry) Descriptor() ([]byte, []int) { + return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *MapValue_Entry) GetKey() *Value { + if x != nil { + return x.Key + } + return nil +} + +func (x *MapValue_Entry) GetValue() *Value { + if x != nil { + return x.Value + } + return nil +} + +var File_google_api_expr_v1alpha1_value_proto protoreflect.FileDescriptor + +var file_google_api_expr_v1alpha1_value_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, + 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x04, 0x0a, 0x05, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69, + 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48, + 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, + 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, + 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75, + 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x44, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x37, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x71, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x31, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, + 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, + 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_expr_v1alpha1_value_proto_rawDescOnce sync.Once + file_google_api_expr_v1alpha1_value_proto_rawDescData = file_google_api_expr_v1alpha1_value_proto_rawDesc +) + +func file_google_api_expr_v1alpha1_value_proto_rawDescGZIP() []byte { + file_google_api_expr_v1alpha1_value_proto_rawDescOnce.Do(func() { + file_google_api_expr_v1alpha1_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_value_proto_rawDescData) + }) + return file_google_api_expr_v1alpha1_value_proto_rawDescData +} + +var file_google_api_expr_v1alpha1_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_api_expr_v1alpha1_value_proto_goTypes = []interface{}{ + (*Value)(nil), // 0: google.api.expr.v1alpha1.Value + (*EnumValue)(nil), // 1: google.api.expr.v1alpha1.EnumValue + (*ListValue)(nil), // 2: google.api.expr.v1alpha1.ListValue + (*MapValue)(nil), // 3: google.api.expr.v1alpha1.MapValue + (*MapValue_Entry)(nil), // 4: google.api.expr.v1alpha1.MapValue.Entry + (structpb.NullValue)(0), // 5: google.protobuf.NullValue + (*anypb.Any)(nil), // 6: google.protobuf.Any +} +var file_google_api_expr_v1alpha1_value_proto_depIdxs = []int32{ + 5, // 0: google.api.expr.v1alpha1.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 1: google.api.expr.v1alpha1.Value.enum_value:type_name -> google.api.expr.v1alpha1.EnumValue + 6, // 2: google.api.expr.v1alpha1.Value.object_value:type_name -> google.protobuf.Any + 3, // 3: google.api.expr.v1alpha1.Value.map_value:type_name -> google.api.expr.v1alpha1.MapValue + 2, // 4: google.api.expr.v1alpha1.Value.list_value:type_name -> google.api.expr.v1alpha1.ListValue + 0, // 5: google.api.expr.v1alpha1.ListValue.values:type_name -> google.api.expr.v1alpha1.Value + 4, // 6: google.api.expr.v1alpha1.MapValue.entries:type_name -> google.api.expr.v1alpha1.MapValue.Entry + 0, // 7: google.api.expr.v1alpha1.MapValue.Entry.key:type_name -> google.api.expr.v1alpha1.Value + 0, // 8: google.api.expr.v1alpha1.MapValue.Entry.value:type_name -> google.api.expr.v1alpha1.Value + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_google_api_expr_v1alpha1_value_proto_init() } +func file_google_api_expr_v1alpha1_value_proto_init() { + if File_google_api_expr_v1alpha1_value_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_expr_v1alpha1_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_expr_v1alpha1_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapValue_Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_expr_v1alpha1_value_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_BoolValue)(nil), + (*Value_Int64Value)(nil), + (*Value_Uint64Value)(nil), + (*Value_DoubleValue)(nil), + (*Value_StringValue)(nil), + (*Value_BytesValue)(nil), + (*Value_EnumValue)(nil), + (*Value_ObjectValue)(nil), + (*Value_MapValue)(nil), + (*Value_ListValue)(nil), + (*Value_TypeValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_expr_v1alpha1_value_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_expr_v1alpha1_value_proto_goTypes, + DependencyIndexes: file_google_api_expr_v1alpha1_value_proto_depIdxs, + MessageInfos: file_google_api_expr_v1alpha1_value_proto_msgTypes, + }.Build() + File_google_api_expr_v1alpha1_value_proto = out.File + file_google_api_expr_v1alpha1_value_proto_rawDesc = nil + file_google_api_expr_v1alpha1_value_proto_goTypes = nil + file_google_api_expr_v1alpha1_value_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 00000000..e7d3805e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,235 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/api/httpbody.proto + +package httpbody + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); +// +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP Content-Type header value specifying the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The HTTP request/response body as raw binary. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Application specific response metadata. Must be set in the first response + // for streaming APIs. + Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *HttpBody) Reset() { + *x = HttpBody{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_httpbody_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpBody) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpBody) ProtoMessage() {} + +func (x *HttpBody) ProtoReflect() protoreflect.Message { + mi := &file_google_api_httpbody_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead. +func (*HttpBody) Descriptor() ([]byte, []int) { + return file_google_api_httpbody_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpBody) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *HttpBody) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *HttpBody) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +var File_google_api_httpbody_proto protoreflect.FileDescriptor + +var file_google_api_httpbody_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, + 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_httpbody_proto_rawDescOnce sync.Once + file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc +) + +func file_google_api_httpbody_proto_rawDescGZIP() []byte { + file_google_api_httpbody_proto_rawDescOnce.Do(func() { + file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData) + }) + return file_google_api_httpbody_proto_rawDescData +} + +var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_httpbody_proto_goTypes = []interface{}{ + (*HttpBody)(nil), // 0: google.api.HttpBody + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_api_httpbody_proto_depIdxs = []int32{ + 1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_httpbody_proto_init() } +func file_google_api_httpbody_proto_init() { + if File_google_api_httpbody_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpBody); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_httpbody_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_httpbody_proto_goTypes, + DependencyIndexes: file_google_api_httpbody_proto_depIdxs, + MessageInfos: file_google_api_httpbody_proto_msgTypes, + }.Build() + File_google_api_httpbody_proto = out.File + file_google_api_httpbody_proto_rawDesc = nil + file_google_api_httpbody_proto_goTypes = nil + file_google_api_httpbody_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 00000000..3e562182 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,1314 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v4.24.4 +// source: google/rpc/error_details.proto + +package errdetails + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clients should wait at least this long between retrying the same request. + RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` +} + +func (x *RetryInfo) Reset() { + *x = RetryInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryInfo) ProtoMessage() {} + +func (x *RetryInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. +func (*RetryInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} +} + +func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { + if x != nil { + return x.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` +} + +func (x *DebugInfo) Reset() { + *x = DebugInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DebugInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DebugInfo) ProtoMessage() {} + +func (x *DebugInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. +func (*DebugInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} +} + +func (x *DebugInfo) GetStackEntries() []string { + if x != nil { + return x.StackEntries + } + return nil +} + +func (x *DebugInfo) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryInfo and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *QuotaFailure) Reset() { + *x = QuotaFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure) ProtoMessage() {} + +func (x *QuotaFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} +} + +func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *PreconditionFailure) Reset() { + *x = PreconditionFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure) ProtoMessage() {} + +func (x *PreconditionFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead. +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4} +} + +func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` +} + +func (x *BadRequest) Reset() { + *x = BadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest) ProtoMessage() {} + +func (x *BadRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead. +func (*BadRequest) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5} +} + +func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if x != nil { + return x.FieldViolations + } + return nil +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` +} + +func (x *RequestInfo) Reset() { + *x = RequestInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestInfo) ProtoMessage() {} + +func (x *RequestInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead. +func (*RequestInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6} +} + +func (x *RequestInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *RequestInfo) GetServingData() string { + if x != nil { + return x.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *ResourceInfo) Reset() { + *x = ResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInfo) ProtoMessage() {} + +func (x *ResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceInfo) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceInfo) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *ResourceInfo) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *ResourceInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *Help) Reset() { + *x = Help{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help) ProtoMessage() {} + +func (x *Help) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help.ProtoReflect.Descriptor instead. +func (*Help) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} +} + +func (x *Help) GetLinks() []*Help_Link { + if x != nil { + return x.Links + } + return nil +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *LocalizedMessage) Reset() { + *x = LocalizedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalizedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalizedMessage) ProtoMessage() {} + +func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} +} + +func (x *LocalizedMessage) GetLocale() string { + if x != nil { + return x.Locale + } + return "" +} + +func (x *LocalizedMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *QuotaFailure_Violation) Reset() { + *x = QuotaFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure_Violation) ProtoMessage() {} + +func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *QuotaFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *QuotaFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation subjects. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *PreconditionFailure_Violation) Reset() { + *x = PreconditionFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure_Violation) ProtoMessage() {} + +func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PreconditionFailure_Violation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PreconditionFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *PreconditionFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A path that leads to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // - `full_name` for a violation in the `full_name` value + // - `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // - `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // - `fullName` for a violation in the `fullName` value + // - `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // - `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BadRequest_FieldViolation) Reset() { + *x = BadRequest_FieldViolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest_FieldViolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest_FieldViolation) ProtoMessage() {} + +func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *BadRequest_FieldViolation) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *BadRequest_FieldViolation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Describes a URL link. +type Help_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *Help_Link) Reset() { + *x = Help_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help_Link) ProtoMessage() {} + +func (x *Help_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. +func (*Help_Link) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Help_Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Help_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +var File_google_rpc_error_details_proto protoreflect.FileDescriptor + +var file_google_rpc_error_details_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, + 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, + 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, + 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_error_details_proto_rawDescOnce sync.Once + file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc +) + +func file_google_rpc_error_details_proto_rawDescGZIP() []byte { + file_google_rpc_error_details_proto_rawDescOnce.Do(func() { + file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) + }) + return file_google_rpc_error_details_proto_rawDescData +} + +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_goTypes = []interface{}{ + (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo + (*RetryInfo)(nil), // 1: google.rpc.RetryInfo + (*DebugInfo)(nil), // 2: google.rpc.DebugInfo + (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure + (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure + (*BadRequest)(nil), // 5: google.rpc.BadRequest + (*RequestInfo)(nil), // 6: google.rpc.RequestInfo + (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo + (*Help)(nil), // 8: google.rpc.Help + (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage + nil, // 10: google.rpc.ErrorInfo.MetadataEntry + (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation + (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 14: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_google_rpc_error_details_proto_depIdxs = []int32{ + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation + 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_rpc_error_details_proto_init() } +func file_google_rpc_error_details_proto_init() { + if File_google_rpc_error_details_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalizedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest_FieldViolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_error_details_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_error_details_proto_goTypes, + DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, + MessageInfos: file_google_rpc_error_details_proto_msgTypes, + }.Build() + File_google_rpc_error_details_proto = out.File + file_google_rpc_error_details_proto_rawDesc = nil + file_google_rpc_error_details_proto_goTypes = nil + file_google_rpc_error_details_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..9d4213eb --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 00000000..0854d298 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `./scripts/vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 00000000..d6ff2674 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 00000000..5d4096d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,36 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC +- [atollena](https://github.com/atollena), Datadog, Inc. +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 00000000..be38384f --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,49 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./scripts/vet.sh + +vetdeps: + ./scripts/vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + deps \ + proto \ + test \ + testsubmodule \ + testrace \ + testdeps \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000..b572707c --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,107 @@ +# gRPC-Go + +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **two latest major** [releases][go-releases]. + +## Installation + +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + + +```go +import "google.golang.org/grpc" +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 00000000..abab2793 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 00000000..52d530d7 --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import ( + "fmt" + "strings" +) + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. +type Attributes struct { + m map[any]any +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) (s string) { + if v, ok := x.(fmt.Stringer); ok { + return fmt.Sprint(v) + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000..29475e31 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 00000000..d7b40b7c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specified +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000..b181f386 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,464 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) + + logger = grpclog.Component("balancer") +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + name := strings.ToLower(b.Name()) + if name != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } + m[name] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + +func init() { + internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + CredsBundle credentials.Bundle + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. + Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. + ServerLoad any +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. + Close() +} + +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 00000000..2b87bd79 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,264 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + state: connectivity.Connecting, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns *resolver.AddressMap + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := resolver.NewAddressMap() + for _, a := range s.ResolverState.Addresses { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns.Set(a, sc) + b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + } + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { + sc.Shutdown() + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in updateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call Shutdown for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 00000000..e31d76e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 00000000..c3341358 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 00000000..4ecfa1c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go new file mode 100644 index 00000000..4d69b405 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -0,0 +1,283 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pickfirst contains the pick_first load balancing policy. +package pickfirst + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +func init() { + balancer.Register(pickfirstBuilder{}) + internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } +} + +var logger = grpclog.Component("pick-first-lb") + +const ( + // Name is the name of the pick_first balancer. + Name = "pick_first" + logPrefix = "[pick-first-lb %p] " +) + +type pickfirstBuilder struct{} + +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (pickfirstBuilder) Name() string { + return Name +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of endpoints received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +type Shuffler interface { + ShuffleAddressListForTesting(n int, swap func(i, j int)) +} + +func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) } + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + var addrs []resolver.Address + if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 { + // Perform the optional shuffling described in gRFC A62. The shuffling will + // change the order of endpoints but not touch the order of the addresses + // within each endpoint. - A61 + if cfg.ShuffleAddressList { + endpoints = append([]resolver.Endpoint{}, endpoints...) + internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] }) + } + + // "Flatten the list by concatenating the ordered list of addresses for each + // of the endpoints, in order." - A61 + for _, endpoint := range endpoints { + // "In the flattened list, interleave addresses from the two address + // families, as per RFC-8304 section 4." - A61 + // TODO: support the above language. + addrs = append(addrs, endpoint.Addresses...) + } + } else { + // Endpoints not set, process addresses until we migrate resolver + // emissions fully to Endpoints. The top channel does wrap emitted + // addresses with endpoints, however some balancers such as weighted + // target do not forward the corresponding correct endpoints down/split + // endpoints properly. Once all balancers correctly forward endpoints + // down, can delete this else conditional. + addrs = state.ResolverState.Addresses + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) + if err != nil { + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil + return + } + + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) + case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, + }) + } + b.state = state.ConnectivityState +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 00000000..260255d3 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "math/rand" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: uint32(rand.Intn(len(scs))), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + next uint32 +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go new file mode 100644 index 00000000..8ad6ce2f --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -0,0 +1,365 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + // The following fields are only accessed within the serializer or during + // initialization. + curBalancerName string + balancer *gracefulswitch.Balancer + + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool +} + +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the updateClientConnState() method +// is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, + }, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + errCh := make(chan error) + uccs := func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + errCh <- err + } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) + return <-errCh +} + +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ResolverError(err) + }) +} + +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. +func (ccb *ccBalancerWrapper) close() { + ccb.mu.Lock() + ccb.closed = true + ccb.mu.Unlock() + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") + ccb.serializer.TrySchedule(func(context.Context) { + if ccb.balancer == nil { + return + } + ccb.balancer.Close() + ccb.balancer = nil + }) + ccb.serializerCancel() +} + +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") + } + ccb.mu.Unlock() + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + if ccb.cc.conns == nil { + // The CC has been closed; ignore this update. + return + } + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } + }) +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 00000000..55bffaa7 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1183 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identity. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 00000000..788c89c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 00000000..32b7fa57 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 00000000..9c8850e3 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1827 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "slices" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = pickfirst.Name +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, +// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this +// function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + } + + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + // Apply dial options. + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + // Determine the resolver to use. + if err := cc.initParsedTargetAndResolverBuilder(); err != nil { + return nil, err + } + + for _, opt := range globalPerTargetDialOptions { + opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if err = cc.initAuthority(); err != nil { + return nil, err + } + + // Register ClientConn with channelz. Note that this is only done after + // channel creation cannot fail. + cc.channelzRegistration(target) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + + return cc, nil +} + +// Dial calls DialContext(context.Background(), target, opts...). +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. +// +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. +// +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEvent{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + cc.mu.Unlock() + + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { + return err + } + + cc.addTraceEvent("exiting idle mode") + return nil +} + +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { + cc.mu.Lock() + + if cc.conns == nil { + cc.mu.Unlock() + return + } + + conns := cc.conns + + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() + cc.csMgr.updateState(connectivity.Idle) + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + + cc.mu.Unlock() + + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } + return nil +} + +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) + cc.addTraceEvent("created") +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// newConnectivityStateManager creates an connectivityStateManager with +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { + return &connectivityStateManager{ + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelz *channelz.Channel + pubSub *grpcsync.PubSub +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + csm.channelz.ChannelMetrics.State.Store(&state) + csm.pubSub.Publish(state) + + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + pickerWrapper *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + retryThrottler atomic.Value // Updated from service config. + + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}", defaultMaxCallAttempts) + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) + } +} + +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig() + + cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig() + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector) + } else { + ret = balancer.ErrBadResolverState + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLBLocked(s.ServiceConfig) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig + } + bw := cc.balancerWrapper + cc.mu.Unlock() + + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// applyFailingLBLocked is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) +} + +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + copy(out, in) + return out +} + +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Start with our address set to the first address; this may be updated if + // we connect to different addresses. + ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr) + + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), + Severity: channelz.CtInfo, + }, + }) + + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.conns[ac] = struct{}{} + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// Target returns the target string of the ClientConn. +func (cc *ClientConn) Target() string { + return cc.target +} + +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + +func (cc *ClientConn) incrCallsStarted() { + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (cc *ClientConn) incrCallsFailed() { + cc.channelz.ChannelMetrics.CallsFailed.Add(1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } + ac.mu.Unlock() + return nil + } + + ac.resetTransportAndUnlock() + return nil +} + +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + addrs = copyAddresses(addrs) + limit := len(addrs) + if limit > 5 { + limit = 5 + } + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) + + ac.mu.Lock() + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return + } + + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } + } + + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil + } + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) + } + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransportAndUnlock() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + cc.resolverWrapper.resolveNow(o) + cc.mu.RUnlock() +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. + cc.mu.Unlock() + + cc.resolverWrapper.close() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelz.ID) + + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw *acBalancerWrapper + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + // This mutex is used on the RPC path, so its usage should be minimized as + // much as possible. + // TODO: Find a lock-free way to retrieve the transport and state from the + // addrConn. + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelz *channelz.SubChannel +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + ac.state = s + ac.channelz.ChannelMetrics.State.Store(&s) + if lastErr == nil { + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } + ac.acbw.updateState(s, ac.curAddr, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + ac.mu.Lock() + if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() + return + } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return + } + + ac.mu.Lock() + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) + } + ac.mu.Unlock() + return + } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { + ac.channelz.ChannelMetrics.Target.Store(&addr.Addr) + if ctx.Err() != nil { + return errConnClosing + } + ac.mu.Lock() + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) + + err := ac.createTransport(ctx, addr, copts, connectDeadline) + if err == nil { + return nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) + + onClose := func(r transport.GoAwayReason) { + ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + } + + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + defer cancel() + copts.ChannelzParent = ac.channelz + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } + // newTr is either nil, or closed. + hcancel() + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err + } + + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (any, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport + } + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateReadyChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelz.ID) + ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancellation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +// initParsedTargetAndResolverBuilder parses the user's dial target and stores +// the parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) initParsedTargetAndResolverBuilder() error { + logger.Infof("original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err == nil { + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + return err + } + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{URL: *u}, nil +} + +// encodeAuthority escapes the authority string based on valid chars defined in +// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2. +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimiters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) initAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + if authorityFromDialOption != "" { + cc.authority = authorityFromDialOption + } else if authorityFromCreds != "" { + cc.authority = authorityFromCreds + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { + cc.authority = "localhost" + endpoint + } else { + cc.authority = encodeAuthority(endpoint) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 00000000..e840858b --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,105 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" +) + +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. +type baseCodec interface { + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) +} + +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 00000000..665e790b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/proto" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials + + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 00000000..4c805c64 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 00000000..41143585 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,283 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/url" + "os" + + "google.golang.org/grpc/grpclog" + credinternal "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/envconfig" +) + +var logger = grpclog.Component("credentials") + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup(t.State.CipherSuite), + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + + // The negotiated protocol can be either of the following: + // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since + // it is the only protocol advertised by the client during the handshake. + // The tls library ensures that the server chooses a protocol advertised + // by the client. + // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement + // for using HTTP/2 over TLS. We can terminate the connection immediately. + np := conn.ConnectionState().NegotiatedProtocol + if np == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } + logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + cs := conn.ConnectionState() + // The negotiated application protocol can be empty only if the client doesn't + // support ALPN. In such cases, we can close the connection since ALPN is required + // for using HTTP/2 over TLS. + if cs.NegotiatedProtocol == "" { + if envconfig.EnforceALPNEnabled { + conn.Close() + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + } else if logger.V(2) { + logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") + } + } + tlsInfo := TLSInfo{ + State: cs, + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 00000000..2b285bee --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,767 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "net/url" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +const ( + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges + defaultMaxCallAttempts = 5 +) + +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.AddGlobalPerTargetDialOptions = func(opt any) { + if ptdo, ok := opt.(perTargetDialOption); ok { + globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo) + } + } + internal.ClearGlobalPerTargetDialOptions = func() { + globalPerTargetDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithBufferPool = withBufferPool +} + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParent channelz.Identifier + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration + defaultScheme string + maxCallAttempts int +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +var globalDialOptions []DialOption + +// perTargetDialOption takes a parsed target and returns a dial option to apply. +// +// This gets called after NewClient() parses the target, and allows per target +// configuration set through a returned DialOption. The DialOption will not take +// effect if specifies a resolver builder, as that Dial Option is factored in +// while parsing target. +type perTargetDialOption interface { + // DialOption returns a Dial Option to apply. + DialOptionForTarget(parsedTarget url.URL) DialOption +} + +var globalPerTargetDialOptions []perTargetDialOption + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The default value for this buffer is 32KB. +// +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: this DialOption is not supported by NewClient. +// Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// Deprecated: this DialOption is not supported by NewClient. +// This API may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + " " + grpcUA + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +// +// Keepalive is disabled by default. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(c channelz.Identifier) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParent = c + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +type MaxHeaderListSizeDialOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) { + do.copts.MaxHeaderListSize = &o.MaxHeaderListSize +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return MaxHeaderListSizeDialOption{ + MaxHeaderListSize: s, + } +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + copts: transport.ConnectOptions{ + ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, + UseProxy: true, + UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), + }, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, + } +} + +// withMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithMaxCallAttempts returns a DialOption that configures the maximum number +// of attempts per call (including retries and hedging) using the channel. +// Service owners may specify a higher value for these parameters, but higher +// values will be treated as equal to the maximum value by the client +// implementation. This mitigates security concerns related to the service +// config being transferred to the client via DNS. +// +// A value of 5 will be used if this dial option is not set or n < 2. +func WithMaxCallAttempts(n int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if n < 2 { + n = defaultMaxCallAttempts + } + o.maxCallAttempts = n + }) +} + +func withBufferPool(bufferPool mem.BufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.BufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 00000000..e7b532b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./scripts/regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 00000000..11d0ae14 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,131 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]any) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + c, _ := registeredCodecs[contentSubtype].(Codec) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 00000000..074c5e23 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go new file mode 100644 index 00000000..6306e8bb --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gzip implements and registers the gzip compressor +// during the initialization. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package gzip + +import ( + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "sync" + + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the gzip compressor. +const Name = "gzip" + +func init() { + c := &compressor{} + c.poolCompressor.New = func() any { + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} + } + encoding.RegisterCompressor(c) +} + +type writer struct { + *gzip.Writer + pool *sync.Pool +} + +// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported). +// NOTE: this function must only be called during initialization time (i.e. in an init() function), +// and is not thread-safe. +// +// The error returned will be nil if the specified level is valid. +func SetLevel(level int) error { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return fmt.Errorf("grpc: invalid gzip compression level: %d", level) + } + c := encoding.GetCompressor(Name).(*compressor) + c.poolCompressor.New = func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return &writer{Writer: w, pool: &c.poolCompressor} + } + return nil +} + +func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) { + z := c.poolCompressor.Get().(*writer) + z.Writer.Reset(w) + return z, nil +} + +func (z *writer) Close() error { + defer z.pool.Put(z) + return z.Writer.Close() +} + +type reader struct { + *gzip.Reader + pool *sync.Pool +} + +func (c *compressor) Decompress(r io.Reader) (io.Reader, error) { + z, inPool := c.poolDecompressor.Get().(*reader) + if !inPool { + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil + } + if err := z.Reset(r); err != nil { + c.poolDecompressor.Put(z) + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (n int, err error) { + n, err = z.Reader.Read(p) + if err == io.EOF { + z.pool.Put(z) + } + return n, err +} + +// RFC1952 specifies that the last four bytes "contains the size of +// the original (uncompressed) input data modulo 2^32." +// gRPC has a max message size of 2GB so we don't need to worry about wraparound. +func (c *compressor) DecompressedSize(buf []byte) int { + last := len(buf) + if last < 4 { + return -1 + } + return int(binary.LittleEndian.Uint32(buf[last-4 : last])) +} + +func (c *compressor) Name() string { + return Name +} + +type compressor struct { + poolCompressor sync.Pool + poolDecompressor sync.Pool +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 00000000..ceec319d --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodecV2(&codecV2{}) +} + +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} + +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { + vv := messageV2Of(v) + if vv == nil { + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) + } + + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil +} + +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { + vv := messageV2Of(v) + if vv == nil { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) +} + +func messageV2Of(v any) proto.Message { + switch v := v.(type) { + case protoadapt.MessageV1: + return protoadapt.MessageV2Of(v) + case protoadapt.MessageV2: + return v + } + + return nil +} + +func (c *codecV2) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 00000000..1d827dd5 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 00000000..3221f7a6 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ce..f1ae080d 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9c..db320105 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 00000000..59c03bc1 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 00000000..e524fdd4 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102..07df71e9 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d82..4b203585 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d7..892dc13d 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000..d9233544 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,308 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.34.2 +// protoc v5.27.1 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 + HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method. +) + +// Enum value maps for HealthCheckResponse_ServingStatus. +var ( + HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", + 3: "SERVICE_UNKNOWN", + } + HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, + "SERVICE_UNKNOWN": 3, + } +) + +func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus { + p := new(HealthCheckResponse_ServingStatus) + *p = x + return p +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor() +} + +func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType { + return &file_grpc_health_v1_health_proto_enumTypes[0] +} + +func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead. +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0} +} + +type HealthCheckRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (x *HealthCheckRequest) Reset() { + *x = HealthCheckRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckRequest) ProtoMessage() {} + +func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheckRequest) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +type HealthCheckResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (x *HealthCheckResponse) Reset() { + *x = HealthCheckResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HealthCheckResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheckResponse) ProtoMessage() {} + +func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if x != nil { + return x.Status + } + return HealthCheckResponse_UNKNOWN +} + +var File_grpc_health_v1_health_proto protoreflect.FileDescriptor + +var file_grpc_health_v1_health_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, + 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, + 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, + 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, + 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, + 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_grpc_health_v1_health_proto_rawDescOnce sync.Once + file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc +) + +func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { + file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + }) + return file_grpc_health_v1_health_proto_rawDescData +} + +var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_goTypes = []any{ + (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus + (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest + (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse +} +var file_grpc_health_v1_health_proto_depIdxs = []int32{ + 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus + 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 3, // [3:5] is the sub-list for method output_type + 1, // [1:3] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_grpc_health_v1_health_proto_init() } +func file_grpc_health_v1_health_proto_init() { + if File_grpc_health_v1_health_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { + switch v := v.(*HealthCheckRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { + switch v := v.(*HealthCheckResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_grpc_health_v1_health_proto_goTypes, + DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs, + EnumInfos: file_grpc_health_v1_health_proto_enumTypes, + MessageInfos: file_grpc_health_v1_health_proto_msgTypes, + }.Build() + File_grpc_health_v1_health_proto = out.File + file_grpc_health_v1_health_proto_rawDesc = nil + file_grpc_health_v1_health_proto_goTypes = nil + file_grpc_health_v1_health_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 00000000..f96b8ab4 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,234 @@ +// Copyright 2015 The gRPC Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 +// source: grpc/health/v1/health.proto + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" +) + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. +type HealthClient interface { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility. +// +// Health is gRPC's mechanism for checking whether a server is able to handle +// RPCs. Its semantics are documented in +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md. +type HealthServer interface { + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error +} + +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} + +func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} +func (UnimplementedHealthServer) testEmbeddedByValue() {} + +// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to HealthServer will +// result in compilation errors. +type UnsafeHealthServer interface { + mustEmbedUnimplementedHealthServer() +} + +func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&Health_ServiceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_Check_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] + +// Health_ServiceDesc is the grpc.ServiceDesc for Health service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Health_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000..877d78fc --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server any + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req any) (any, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 00000000..b15cf482 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,109 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "context" + "errors" + "math/rand" + "time" + + grpcbackoff "google.golang.org/grpc/backoff" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 00000000..13821a92 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 00000000..73bb4c4e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,419 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return nil, errBalancerClosed + } + bw := &balancerWrapper{ + builder: builder, + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return nil, balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return bw, nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + + if balToUpdate == nil { + return errBalancerClosed + } + + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + gsb.mu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/shutdown +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + builder balancer.Builder + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + sc.Shutdown() + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 00000000..94a08d68 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) any +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) any { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 00000000..755fdebc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,192 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type Logger interface { + GetMethodLogger(methodName string) MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a MethodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binary logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.GetMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { + // Max length of header and message. + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} +} + +type logger struct { + config LoggerConfig +} + +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { + return fmt.Errorf("conflicting global rules found") + } + l.config.All = ml + return nil +} + +// Set method logger for "service/*". +// +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) + } + l.config.Services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) + } + l.config.Methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) + } + l.config.Blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if _, ok := l.config.Blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if l.config.All == nil { + return nil + } + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 00000000..1ee00a39 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 00000000..f9e80e27 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 00000000..96693289 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,446 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "context" + "net" + "strings" + "sync/atomic" + "time" + + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this pluggable. +} + +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it pluggable. + } +} + +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { + m := c.toProto() + timestamp := timestamppb.Now() + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *binlogpb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + return m +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) +} + +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type LogEntryConfig interface { + toProto() *binlogpb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &binlogpb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = durationpb.New(c.Timeout) + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &binlogpb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = binlogpb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = binlogpb.Address_TYPE_IPV6 + } else { + ret.Type = binlogpb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = binlogpb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = binlogpb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 00000000..9ea598b1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/protobuf/proto" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*binlogpb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} +} + +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } + if err := fs.out.Write(e); err != nil { + return err + } + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + done: make(chan struct{}), + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 00000000..11f91668 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,116 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import ( + "errors" + "sync" +) + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan any + closed bool + closing bool + mu sync.Mutex + backlog []any +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan any, 1)} +} + +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t any) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return errBufferClosed + } + if len(b.backlog) == 0 { + select { + case b.c <- t: + return nil + default: + } + } + b.backlog = append(b.backlog, t) + return nil +} + +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } else if b.closing && !b.closed { + close(b.c) + } +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { + return b.c +} + +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closing { + return + } + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 00000000..d7e9e1d5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 00000000..64c79195 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,395 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided into two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 00000000..078bb812 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,230 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines internal APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +package channelz + +import ( + "sync/atomic" + "time" + + "google.golang.org/grpc/internal" +) + +var ( + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + atomic.StoreInt32(&curState, 1) +} + +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.LoadInt32(&curState) == 1 +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetrics, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) +} + +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) +} + +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) +} + +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *Server { + return db.getServer(id) +} + +// RegisterChannel registers the given channel c in the channelz database with +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(parent *Channel, target string) *Channel { + id := IDGen.genID() + + if !IsOn() { + return &Channel{ID: id} + } + + isTopChannel := parent == nil + + cn := &Channel{ + ID: id, + RefName: target, + nestedChans: make(map[int64]string), + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, + } + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn +} + +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { + id := IDGen.genID() + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, + } + + if !IsOn() { + return sc + } + + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) + return sc +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(ref string) *Server { + id := IDGen.genID() + if !IsOn() { + return &Server{ID: id} + } + + svr := &Server{ + RefName: ref, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + ID: id, + } + db.addServer(id, svr) + return svr +} + +// RegisterSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) + } + return skt +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id int64) { + if !IsOn() { + return + } + db.removeEntry(id) +} + +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { + id int64 +} + +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *IDGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 00000000..ee4d7212 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 00000000..cdfc49d6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 00000000..fa64834b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 00000000..3b88e4cb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go new file mode 100644 index 00000000..5ac73ff8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go new file mode 100644 index 00000000..0e6e18e1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -0,0 +1,47 @@ +//go:build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) +} + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 00000000..36b86740 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 00000000..9deee7f6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 00000000..25ade623 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 00000000..2919632d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 00000000..f792fd22 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 00000000..452985f8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strconv" + "strings" +) + +var ( + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) + // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled + // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this + // option is present for backward compatibility. This option may be overridden + // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" + // or "false". + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) +) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 00000000..dd314cfb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 00000000..29f234ac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") +) diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1..7617be21 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de..092ad187 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 00000000..19b9d639 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) + + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } + } + + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 00000000..fbe697c3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 00000000..6635f7bc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 00000000..6d8c2f51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.TrySchedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.TrySchedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 00000000..e8d86698 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 00000000..b25b0bae --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 00000000..e2f948e8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 00000000..6f22bd89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 00000000..ec62b477 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 00000000..7a092b2b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 00000000..fe49cb74 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d665398..7aae9240 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -203,11 +203,31 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 00000000..900bfb71 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 00000000..dbee7a60 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/protoadapt" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e any) string { + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { + mm := protojson.MarshalOptions{ + Indent: jsonIndent, + Multiline: true, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 00000000..f0603871 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..4552db16 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,458 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/resolver/dns/internal" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB + // addresses from SRV records. Must not be changed after init time. + EnableSRVLookups = false + + // MinResolutionInterval is the minimum interval at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionInterval = 30 * time.Second + + // ResolvingTimeout specifies the maximum duration for a DNS resolution request. + // If the timeout expires before a response is received, the request will be canceled. + // + // It is recommended to set this value at application startup. Avoid modifying this variable + // after initialization as it's not thread-safe for concurrent modification. + ResolvingTimeout = 30 * time.Second + + logger = grpclog.Component("dns") +) + +func init() { + resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.TimeNowFunc = time.Now + internal.TimeUntilFunc = time.Until + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, address) + } +} + +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: internal.AddressDialer(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver internal.NetResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var nextResolutionTime time.Time + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval) + select { + case <-d.ctx.Done(): + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + return + case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)): + } + } +} + +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service + // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT(ctx) + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", internal.ErrMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", internal.ErrEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return rand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 00000000..c0eae4f5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overridden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // TimeNowFunc is used by the DNS resolver to get the current time. + // In non-test code, this is implemented by time.Now. In test code, + // this can be used to control the current time for the resolver. + TimeNowFunc func() time.Time + + // TimeUntilFunc is used by the DNS resolver to calculate the remaining + // wait time for re-resolution. In non-test code, this is implemented by + // time.Until. In test code, this can be used to control the remaining + // time for resolver to wait for re-resolution. + TimeUntilFunc func(time.Time) time.Duration + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 00000000..b901c7ba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import ( + "errors" + + "google.golang.org/grpc/resolver" +) + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) +} + +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 00000000..27cd81af --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,78 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 00000000..11d82afc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 00000000..51e733e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go new file mode 100644 index 00000000..fd33af51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/labels.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats provides internal stats related functionality. +package stats + +import "context" + +// Labels are the labels for metrics. +type Labels struct { + // TelemetryLabels are the telemetry labels to record. + TelemetryLabels map[string]string +} + +type labelsKey struct{} + +// GetLabels returns the Labels stored in the context, or nil if there is one. +func GetLabels(ctx context.Context) *Labels { + labels, _ := ctx.Value(labelsKey{}).(*Labels) + return labels +} + +// SetLabels sets the Labels in the context. +func SetLabels(ctx context.Context, labels *Labels) context.Context { + // could also append + return context.WithValue(ctx, labelsKey{}, labels) +} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 00000000..be110d41 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc820..75792538 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 00000000..b3a72276 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 00000000..54c24c2f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux environments.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(net.Conn, time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7..7e7aaa54 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8..d5c1085e 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 00000000..070680ed --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 00000000..ef72fbb3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,1035 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "net" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it any + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i any) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() any { + return il.head.it +} + +func (il *itemList) dequeue() any { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + reader mem.Reader + // onEachWrite is called every time + // a part of data is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn error // if set, loopyWriter will exit with this error +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. + mu sync.Mutex + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Pointer[chan struct{}] +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. +func (c *controlBuffer) throttle() { + if ch := c.trfChan.Load(); ch != nil { + select { + case <-(*ch): + case <-c.done: + } + } +} + +// put adds an item to the controlbuf. +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. +func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing + } + if f != nil { + if !f() { // f wasn't successful + return false, nil + } + } + if it == nil { + return true, nil + } + + var wakeUp bool + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + ch := make(chan struct{}) + c.trfChan.Store(&ch) + } + } + if wakeUp { + select { + case c.wakeupCh <- struct{}{}: + default: + } + } + return true, nil +} + +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. +func (c *controlBuffer) get(block bool) (any, error) { + for { + c.mu.Lock() + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. + c.mu.Unlock() + return frame, err + } + c.consumerWaiting = true + c.mu.Unlock() + + // Release the lock above and wait to be woken up. + select { + case <-c.wakeupCh: + case <-c.done: + return nil, errors.New("transport closed by client") + } + } +} + +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. +func (c *controlBuffer) finish() { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return + } + c.closed = true + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() + } + } + + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch := c.trfChan.Swap(nil) + if ch != nil { + close(*ch) + } +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resembling a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + conn net.Conn + logger *grpclog.PrefixLogger + bufferPool mem.BufferPool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes the underlying connection. The connection is always left open to +// allow different closing behavior on the client and server. +func (l *loopyWriter) run() (err error) { + defer func() { + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + } + l.cbuf.finish() + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return + } + } +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + l.applySettings(s.ss) + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + return l.originateStream(str, h) +} + +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) + return nil + } + if err := hdr.initStream(str.id); err != nil { + return err + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) { + str, ok := l.estdStreams[df.streamID] + if !ok { + return + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { + o.resp <- l.sendQuota +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i any) error { + switch i := i.(type) { + case *incomingWindowUpdate: + l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } + return nil +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. + + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h + } else { + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) + + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && remainingBytes == 0 { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 00000000..bc8ee074 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 00000000..97198c51 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() + f.limit = n + f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 00000000..ce878693 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,502 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusMethodNotAllowed) + return nil, errors.New(msg) + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) + } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) + } + if _, ok := w.(http.Flusher); !ok { + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) + } + + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + peer: p, + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + bufferPool: bufferPool, + } + st.logger = prefixLoggerForServerHandlerTransport(st) + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + peer peer.Peer + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats []stats.Handler + logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool +} + +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) +} + +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) + } + + if len(s.trailer) > 0 { + for k, vv := range s.trailer { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close(errors.New("finished writing status")) + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + defer data.Free() + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } + ht.rw.(http.Flusher).Flush() + }) + if err != nil { + data.Free() + return err + } + return nil +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { + // With this transport type there will be exactly 1 stream: this HTTP request. + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close(errors.New("request is done processing")) + }() + + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + req := ht.req + s := &Stream{ + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) + if n > 0 { + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain(string) { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 00000000..c769deab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1823 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" + isyscall "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +var goAwayLoopyWriterTimeout = 5 * time.Second + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandlers []stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + registeredCompressors string + + // Do not access controlBuf with mu held. + mu sync.Mutex // guard the following variables + nextID uint32 + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + channelz *channelz.Socket + + onClose func(GoAwayReason) + + bufferPool mem.BufferPool + + connectionID uint64 + logger *grpclog.PrefixLogger +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) + } + + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + + // The following defer and goroutine monitor the connectCtx for cancellation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandlers: opts.StatsHandlers, + initialWindowSize: initialWindowSize, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + keepaliveEnabled: keepaliveEnabled, + bufferPool: opts.BufferPool, + onClose: onClose, + } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + sh.HandleConn(t.ctx, connBegin) + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err != nil { + // writerDone should be closed since the loopy goroutine + // wouldn't have started in the case this function returns an error. + close(t.writerDone) + t.Close(err) + } + }() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + // Block until the server preface is received successfully or an error occurs. + if err = <-readerErrCh; err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + if err := t.loopy.run(); !isIOError(err) { + // Immediately close the connection, as the loopy writer returns + // when there are no more active streams and we were draining (the + // server sent a GOAWAY). For I/O errors, the reader will hit it + // after draining any remaining incoming data. + t.conn.Close() + } + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, + } +} + +// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway +// to be the last frame loopy writes to the transport. +func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + return false, g.closeConn +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + registeredCompressors := t.registeredCompressors + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { + ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(uint32) error { + t.mu.Lock() + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { + t.mu.Unlock() + cleanup(ErrConnClosing) + return ErrConnClosing + } + if channelz.IsOn() { + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + transportDrainRequired := false + checkForStreamQuota := func() bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + + hdr.streamID = t.nextID + t.nextID += 2 + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = hdr.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.activeStreams[s.id] = s + t.mu.Unlock() + + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func() bool { + if t.maxSendHeaderListSize == nil { + return true + } + var sz int64 + for _, f := range hdr.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func() bool { + return checkForHeaderListSize() && checkForStreamQuota() + }, hdr) + if err != nil { + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, &NewStreamError{Err: hdrListSizeErr} + } + firstTry = false + select { + case <-ch: + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} + case <-t.ctx.Done(): + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} + } + } + if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } + t.GracefulClose() + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) + } else { + t.channelz.SocketMetrics.StreamsFailed.Add(1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func() bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed anymore. +func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) + t.mu.Lock() + // Make sure we only close once. + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } + t.cancel() + t.conn.Close() + channelz.RemoveEntry(t.channelz.ID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + for _, sh := range t.statsHandlers { + connEnd := &stats.ConnEnd{ + Client: true, + } + sh.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() + return errStreamDone + } + } else if s.getState() != streamActive { + _ = reader.Close() + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + reader: reader, + } + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() + return err + } + } + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + updateIWS := func() bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancellation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func() bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } + } + t.mu.Unlock() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutex to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + close(s.headerChan) + } + } + + for _, sh := range t.statsHandlers { + if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + + if !endStream { + return + } + + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) + + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) +} + +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { + frame, err := t.framer.fr.ReadFrame() + if err != nil { + return connectionErrorf(true, err, "error reading server preface: %v", err) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) + } + t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := min(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 00000000..584b50fe --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1475 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" + "google.golang.org/protobuf/proto" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats []stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelz *channelz.Socket + bufferPool mem.BufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: config.MaxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + + done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } + t := &http2Server{ + done: done, + conn: conn, + peer: peer, + framer: framer, + readerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), + maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandlers, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + bufferPool: config.BufferPool, + } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) + t.logger = prefixLoggerForServerTransport(t) + + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close(err) + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) + err := t.loopy.run() + close(t.loopyWriterDone) + if !isIOError(err) { + // Close the connection if a non-I/O error occurs (for I/O errors + // the reader will also encounter the error and close). Wait 1 + // second before closing the connection, or when the reader is done + // (i.e. the client already closed the connection or a connection + // error occurred). This avoids the potential problem where there + // is unread data on the receive side of the connection, which, if + // closed, would lead to a TCP RST instead of FIN, and the client + // encountering errors. For more info: + // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() + select { + case <-t.readerDone: + case <-timer.C: + } + t.conn.Close() + } + }() + go t.keepalive() + return t, nil +} + +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + } + t.maxStreamID = streamID + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil + } + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(ctx) + } + + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return nil + } + if httpMethod != http.MethodPost { + t.mu.Unlock() + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + s.cancel() + return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + close(t.readerDone) + <-t.loopyWriterDone + }() + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + t.Close(err) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if err := t.operateHeaders(ctx, frame, handle); err != nil { + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) + } + } + if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func() bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } + + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } + } + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + hf := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + } + success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + sh.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + + if s.getState() == streamDone { + return nil + } + + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() + return err + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + _ = reader.Close() + return t.streamContextErr(s) + } + } + + df := &dataFrame{ + streamID: s.id, + h: hdr, + reader: reader, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() + return t.streamContextErr(s) + } + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain("max_idle") + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain("max_age") + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") + } + t.controlBuf.put(closeConnection{}) + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout)) + return + } + if !outstandingPing { + if channelz.IsOn() { + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := min(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close(err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + } + channelz.RemoveEntry(t.channelz.ID) + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) + } else { + t.channelz.SocketMetrics.StreamsFailed.Add(1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) Drain(debugData string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainEvent != nil { + return + } + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + sid := t.maxStreamID + retErr := g.closeConn + if len(t.activeStreams) == 0 { + retErr = errors.New("second GOAWAY written and no active streams left to process") + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + t.framer.writer.Flush() + if retErr != nil { + return false, retErr + } + return true, nil + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case <-t.drainEvent.Done(): + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } +} + +func (t *http2Server) IncrMsgSent() { + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) +} + +func (t *http2Server) IncrMsgRecv() { + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { + return &peer.Peer{ + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := rand.Int63n(2*r) - r + return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func SetConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 00000000..3613d7b6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,468 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "encoding/base64" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // https://httpwg.org/specs/rfc7540.html#SettingValues + http2InitHeaderTableSize = 4096 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + fmt.Fprintf(&sb, "%%%02X", b) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + sb.WriteByte(b) + } else { + fmt.Fprintf(&sb, "%%%02X", b) + } + } + msg = msg[size:] + } + return sb.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + sb.WriteByte(c) + } else { + sb.WriteByte(byte(parsed)) + i += 2 + } + } else { + sb.WriteByte(c) + } + } + return sb.String() +} + +type bufWriter struct { + pool *sync.Pool + buf []byte + offset int + batchSize int + conn net.Conn + err error +} + +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ + batchSize: batchSize, + conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w +} + +func (w *bufWriter) Write(b []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + n, err := w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } + written := 0 + for len(b) > 0 { + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err + } + } + return written, nil +} + +func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) + w.offset = 0 + return w.err +} + +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +var writeBufferPoolMap = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +func getWriteBufferPool(size int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 00000000..42ed2b07 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 00000000..c11b5278 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 00000000..54b22443 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,150 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "google.golang.org/grpc/internal" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) + if err != nil { + return nil, err + } + if proxyURL == nil { + // proxy is disabled if proxyURL is nil. + return conn, err + } + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 00000000..924ba4f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,934 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer mem.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last mem.Buffer // Stores the remaining data in the previous calls. + err error +} + +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { + r.last = nil + } + return buf, nil + } + if r.closeStream != nil { + buf, r.err = r.readClient(n) + } else { + buf, r.err = r.read(n) + } + return buf, r.err +} + +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, n) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return 0, m.err + } + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) + } + + return m.buffer, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct ClientTransport // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader *transportReader + fc *inFlow + wq *writeQuota + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + headerWireLength int // Only set on server side. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was already set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() []string { + values := strings.Split(s.clientAdvertisedCompressors, ",") + for i, v := range values { + values[i] = strings.TrimSpace(v) + } + return values +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metadata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return nil, er + } + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil +} + +// transportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader *recvBufferReader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) + if err != nil { + t.er = err + return 0, err + } + t.windowHandler(n) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err + } + t.windowHandler(buf.Len()) + return buf, nil +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle + StatsHandlers []stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + SharedWriteBuffer bool + ChannelzParent *channelz.Server + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 + BufferPool mem.BufferPool +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(context.Context, func(*Stream)) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close(err error) + + // Peer returns the peer of the server transport. + Peer() *peer.Peer + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain(debugData string) + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indicate application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 00000000..eb42b19f --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 00000000..c37c58c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 00000000..228e9c2f --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 00000000..4d66b2cc --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000..d2e15253 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,295 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // Case insensitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { + return copyOf(v) + } + } + return nil +} + +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000..499a49c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "fmt" + "net" + "strings" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +// String ensures the Peer types implements the Stringer interface in order to +// allow to print a context with a peerKey value effectively. +func (p *Peer) String() string { + if p == nil { + return "Peer" + } + sb := &strings.Builder{} + sb.WriteString("Peer{") + if p.Addr != nil { + fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String()) + } else { + fmt.Fprintf(sb, "Addr: , ") + } + if p.LocalAddr != nil { + fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String()) + } else { + fmt.Fprintf(sb, "LocalAddr: , ") + } + if p.AuthInfo != nil { + fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType()) + } else { + fmt.Fprintf(sb, "AuthInfo: ") + } + sb.WriteString("}") + + return sb.String() +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000..bdaa2130 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,219 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// pickerGeneration stores a picker and a channel used to signal that a picker +// newer than this one is available. +type pickerGeneration struct { + // picker is the picker produced by the LB policy. May be nil if a picker + // has never been produced. + picker balancer.Picker + // blockingCh is closed when the picker has been invalidated because there + // is a new one available. + blockingCh chan struct{} +} + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + // If pickerGen holds a nil pointer, the pickerWrapper is closed. + pickerGen atomic.Pointer[pickerGeneration] + statsHandlers []stats.Handler // to record blocking picker calls +} + +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + pw := &pickerWrapper{ + statsHandlers: statsHandlers, + } + pw.pickerGen.Store(&pickerGeneration{ + blockingCh: make(chan struct{}), + }) + return pw +} + +// updatePicker is called by UpdateState calls from the LB policy. It +// unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + old := pw.pickerGen.Swap(&pickerGeneration{ + picker: p, + blockingCh: make(chan struct{}), + }) + close(old.blockingCh) +} + +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac + ac.incrCallsStarted() + done := result.Done + result.Done = func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { + var ch chan struct{} + + var lastPickErr error + + for { + pg := pw.pickerGen.Load() + if pg == nil { + return nil, balancer.PickResult{}, ErrClientConnClosing + } + if pg.picker == nil { + ch = pg.blockingCh + } + if ch == pg.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - we have already called pick on the current picker. + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + + ch = pg.blockingCh + p := pg.picker + + pickResult, err := p.Pick(info) + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + } + + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } + if t := acbw.ac.getReadyTransport(); t != nil { + if channelz.IsOn() { + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil + } + return t, pickResult, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + old := pw.pickerGen.Swap(nil) + close(old.blockingCh) +} + +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { + old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})}) + close(old.blockingCh) +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 00000000..e87a17f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData mem.BufferSlice + hdr []byte + payload mem.BufferSlice + pf payloadFormat +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg any) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) + if err != nil { + return err + } + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + + return nil +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000..ef3d6ed6 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "time" + + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} + +// SetMinResolutionInterval sets the default minimum interval at which DNS +// re-resolutions are allowed. This helps to prevent excessive re-resolution. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +func SetMinResolutionInterval(d time.Duration) { + dns.MinResolutionInterval = d +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 00000000..ada5b9bb --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,251 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value any +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000..20285451 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,332 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// scheme is initially set to "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme + internal.UserSetDefaultScheme = true +} + +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. +func GetDefaultScheme() string { + return defaultScheme +} + +// Address represents a server the client connects to. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + // + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata any +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the effective authority of the clientconn for which the + // resolver is built. + Authority string +} + +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address + + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +type Target struct { + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + +// String returns the canonical string representation of Target. +func (t Target) String() string { + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 00000000..23bb3fb2 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,201 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + Authority: ccr.cc.authority, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.TrySchedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 00000000..2d96f140 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,1069 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return io.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int + onFinish []func(err error) +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// StaticMethod returns a CallOption which specifies that a call is being made +// to a method that is static, which means the method is known at compile time +// and doesn't change at runtime. This can be used as a signal to stats plugins +// that this method is safe to include as a key to a measurement. +func StaticMethod() CallOption { + return StaticMethodCallOption{} +} + +// StaticMethodCallOption is a CallOption that specifies that a call comes +// from a static method. +type StaticMethodCallOption struct { + EmptyCallOption +} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. +// +// By default, RPCs do not "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} + +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = newCodecV1Bridge(o.Codec) + return nil +} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = newCodecV0Bridge(o.Codec) + return nil +} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r streamReader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte + + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// +// No other error values or types must be returned, which also means +// that the underlying streamReader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { + return 0, nil, err + } + + pf := payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + + data, err := p.r.Read(int(length)) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, data, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(b.Len()) > math.MaxUint32 { + b.Free() + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil + } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) + wrapErr := func(err error) error { + out.Free() + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + if compressor != nil { + z, err := compressor.Compress(w) + if err != nil { + return nil, 0, wrapErr(err) + } + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } + } + if err := z.Close(); err != nil { + return nil, 0, wrapErr(err) + } + } else { + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } + } + return out, compressionMade, nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { + hdr = make([]byte, headerLen) + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData + } else { + length = uint32(data.Len()) + payload = data + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload +} + +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + compressedLength int // The compressed length got from wire. + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } +} + +// recvAndDecompress reads a message from the stream, decompressing it if necessary. +// +// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as +// the buffer is no longer needed. +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() + } + + var size int + if pf.isCompressed() { + defer compressed.Free() + + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } + size = len(uncompressedBuf) + } else { + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + out.Free() + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } else { + out = compressed + } + + if payInfo != nil { + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out + } + + return out, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, 0, err + } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err + } + return out, out.Len(), nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) + if err != nil { + return err + } + + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...any) error { + return status.Errorf(c, format, a...) +} + +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: + return errContextDeadline + case context.Canceled: + return errContextCanceled + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } + + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err + } + + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.CodecV2); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = getCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = getCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 9. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true + SupportPackageIsVersion8 = true + SupportPackageIsVersion9 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 00000000..d1e1415a --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,2208 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption + internal.BufferPool = bufferPool +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType any + Methods []MethodDesc + Streams []StreamDesc + Metadata any +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata any +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events traceEventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines + + channelz *channelz.Server + + serverWorkerChannel chan func() + serverWorkerChannelClose func() +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger + inTapHandle tap.ServerInHandle + statsHandlers []stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + sharedWriteBuffer bool + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 + bufferPool mem.BufferPool + waitForHandlers bool +} + +var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, + bufferPool: mem.DefaultBufferPool(), +} +var globalServerOptions []ServerOption + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = internal.KeepaliveMinServerPingTime + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = newCodecV0Bridge(codec) + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSizeServerOption is a ServerOption that sets the max +// (uncompressed) size of header list that the server is prepared to accept. +type MaxHeaderListSizeServerOption struct { + MaxHeaderListSize uint32 +} + +func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) { + so.maxHeaderListSize = &o.MaxHeaderListSize +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return MaxHeaderListSizeServerOption{ + MaxHeaderListSize: s, + } +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + +func bufferPool(bufferPool mem.BufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.bufferPool = bufferPool + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorker blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel + if !ok { + return + } + f() + } + go s.serverWorker() +} + +// initServerWorkers creates worker goroutines and a channel to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannel = make(chan func()) + s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + close(s.serverWorkerChannel) + }) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + go s.serverWorker() + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + channelz: channelz.RegisterServer(""), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + channelz.Info(logger, s.channelz, "Server created") + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...any) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...any) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl any) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss any) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata any +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { + net.Listener + channelz *channelz.Socket +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), + } + s.lis[ls] = true + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + s.mu.Unlock() + channelz.Info(logger, ls.channelz, "ListenSocket created") + + var tempDelay time.Duration // how long to sleep on accept failure + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) + if st == nil { + return + } + + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(context.Background(), st, rawConn) + s.removeConn(lisAddr, st) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, + StatsHandlers: s.opts.statsHandlers, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParent: s.channelz, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } + return nil + } + + return st +} + +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } + + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer s.handlersWG.Done() + s.handleStream(st, stream) + } + + if s.opts.numServerWorkers > 0 { + select { + case s.serverWorkerChannel <- f: + return + default: + // If all stream workers are busy, fallback to the default code path. + } + } + go f() + }) +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) + if err != nil { + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(r.Context(), st, nil) +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close(errors.New("Server.addConn called when server has already been stopped")) + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain("") + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) incrCallsStarted() { + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + s.channelz.ServerMetrics.CallsSucceeded.Add(1) +} + +func (s *Server) incrCallsFailed() { + s.channelz.ServerMetrics.CallsFailed.Add(1) +} + +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) + return err + } + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) + if err != nil { + data.Free() + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) + return err + } + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil { + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } + } + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(ctx, statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(ctx, end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + var sendCompressorName string + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + sendCompressorName = cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + var payInfo *payloadInfo + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + defer payInfo.free() + } + + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return err + } + defer d.Free() + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v any) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + + for _, sh := range shs { + sh.HandleRPC(ctx, &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: d.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: d.Materialize(), + } + for _, binlog := range binlogs { + binlog.Log(ctx, cm) + } + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx = NewContextWithServerTransportStream(ctx, stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if len(binlogs) != 0 { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + sh := &binarylog.ServerHeader{ + Header: h, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + } + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, st) + } + } + return err + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + sm := &binarylog.ServerMessage{ + Message: reply, + } + for _, binlog := range binlogs { + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) + } + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(ctx, st) + } + } + return t.WriteStatus(stream, statusOK) +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + shs := s.opts.statsHandlers + var statsBegin *stats.Begin + if len(shs) != 0 { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(ctx, statsBegin) + } + } + ctx = NewContextWithServerTransportStream(ctx, stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: shs, + } + + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + for _, sh := range shs { + sh.HandleRPC(ctx, end) + } + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, logEntry) + } + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + ss.sendCompressorName = s.opts.cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server any + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(ctx, st) + } + } + return t.WriteStatus(ss.s, statusOK) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = newTraceContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(ctx, t, stream, srv, md, ti) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() + } + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) + } + if ti != nil { + ti.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.stop(false) +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) + s.mu.Lock() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() + } + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + + if s.opts.numServerWorkers > 0 { + // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // connections have been closed above ensures that there are no + // goroutines executing the callback passed to st.HandleStreams (where + // the channel is written to). + s.serverWorkerChannelClose() + } + + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() + } + + if s.events != nil { + s.events.Finish() + s.events = nil + } +} + +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } + } +} + +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } + } + s.drain = true + } +} + +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() + } + s.lis = nil +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return getCodec(proto.Name) + } + codec := getCodec(contentSubtype) + if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) + return getCodec(proto.Name) + } + return codec +} + +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return stream.ClientAdvertisedCompressors(), nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name string, clientCompressors []string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range clientCompressors { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 00000000..2671c5ef --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,356 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/pickfirst" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig serviceconfig.LoadBalancingConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *internalserviceconfig.Duration + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *json.RawMessage + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult { + return parseServiceConfig(js, defaultMaxCallAttempts) + } +} +func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + c := rsc.LoadBalancingConfig + if c == nil { + name := pickfirst.Name + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy + } + if balancer.Get(name) == nil { + name = pickfirst.Name + } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} + } + r := json.RawMessage(strCfg) + c = &r + } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} + } + sc.lbConfig = cfg + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: (*time.Duration)(m.Timeout), + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil { + logger.Warningf("grpc: unmarshalling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + + if jrp.MaxAttempts <= 1 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + if jrp.MaxAttempts < maxAttempts { + maxAttempts = jrp.MaxAttempts + } + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: maxAttempts, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 00000000..dc03731e --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 00000000..71195c49 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,343 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the InPayload returns and must be + // copied if needed later. + Payload any + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. This may be modified after + // the call to HandleRPC which provides the OutPayload returns and must be + // copied if needed later. + Payload any + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 00000000..bb2b2a21 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1841 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "strconv" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv any, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m any) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m any) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } + + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { + return nil, err + } + + if len(cs.binlogs) != 0 { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: newTrace("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = newTraceContext(ctx, trInfo.tr) + } + + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } + return err + } + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + } + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) + } + a.s = s + a.ctx = s.Context() + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err + } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } + // Wait for the trailers. + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return true, nil + } + if cs.cc.dopts.disableRetry { + return false, err + } + + pushback := 0 + hasPushback := false + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := a.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + } + + var code codes.Code + if a.s != nil { + code = a.s.Status().Code() + } else { + code = status.Code(err) + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(rand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return false, nil + case <-cs.ctx.Done(): + t.Stop() + return false, status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { + for { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. + return err + } + // Note that the first op in replayBuffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.replayBuffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(a, err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + + if err != nil { + cs.finish(err) + // Do not return the error. The user should get it by calling Recv(). + return nil, nil + } + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + return m, nil +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + cleanup() + return + } + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) +} + +func (cs *clientStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + if err != nil { + return err + } + + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) + } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() + op := func(a *csAttempt) error { + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() + } + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ + OnClientSide: true, + Message: data.Materialize(), + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } + } + return err +} + +func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if len(cs.binlogs) != 0 { + recvInfo = &payloadInfo{} + defer recvInfo.free() + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes.Materialize(), + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + + cs.mu.Unlock() + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} + defer payInfo.free() + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: payInfo.uncompressedBytes.Len(), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) + } + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.pickResult.Done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.pickResult.Done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + for _, sh := range a.statsHandlers { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + sh.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newNonRetryClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transport. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. + go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() + select { + case <-acCtx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + if err != nil { + return err + } + + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + // TODO(dfawley): should we be checking len(data) instead? + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m any) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) + } + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + sendCompressorName string + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler []stats.Handler + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + + // load hdr, payload, data + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + if err != nil { + return err + } + + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + + // TODO(dfawley): should we be checking len(data) instead? + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + + if len(ss.binlogs) != 0 { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + sm := &binarylog.ServerMessage{ + Message: data.Materialize(), + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) + } + } + return nil +} + +func (ss *serverStream) RecvMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + payInfo = &payloadInfo{} + defer payInfo.free() + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err == io.EOF { + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } + } + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes.Materialize(), + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, 0, err + } + compData, pf, err := compress(data, cp, comp, pool) + if err != nil { + data.Free() + return nil, nil, nil, 0, err + } + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil +} diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go new file mode 100644 index 00000000..0037fee0 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -0,0 +1,238 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// ServerStreamingClient represents the client side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. + Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. + ClientStream +} + +// ServerStreamingServer represents the server side of a server-streaming (one +// request, many responses) RPC. It is generic over the type of the response +// message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. +type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. + Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// ClientStreamingClient represents the client side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). + Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. + CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. + ClientStream +} + +// ClientStreamingServer represents the server side of a client-streaming (many +// requests, one response) RPC. It is generic over both the type of the request +// message stream and the type of the unary response message. It is used in +// generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. +type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. + Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. + SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// BidiStreamingClient represents the client side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). + Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. + Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. + ClientStream +} + +// BidiStreamingServer represents the server side of a bidirectional-streaming +// (many requests, many responses) RPC. It is generic over both the type of the +// request message stream and the type of the response message stream. It is +// used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. +type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. + Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. + Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. + ServerStream +} + +// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient, +// and BidiStreamingClient interfaces. It is used in generated code. +type GenericClientStream[Req any, Res any] struct { + ClientStream +} + +var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil) +var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) +var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil) + +// Send pushes one message into the stream of requests to be consumed by the +// server. The type of message which can be sent is determined by the Req type +// parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Send(m *Req) error { + return x.ClientStream.SendMsg(m) +} + +// Recv reads one message from the stream of responses generated by the server. +// The type of the message returned is determined by the Res type parameter +// of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) { + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// CloseAndRecv closes the sending side of the stream, then receives the unary +// response from the server. The type of message which it returns is determined +// by the Res type parameter of the GenericClientStream receiver. +func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(Res) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer, +// and BidiStreamingServer interfaces. It is used in generated code. +type GenericServerStream[Req any, Res any] struct { + ServerStream +} + +var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil) +var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) +var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil) + +// Send pushes one message into the stream of responses to be consumed by the +// client. The type of message which can be sent is determined by the Res +// type parameter of the serverStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Send(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// SendAndClose pushes the unary response to the client. The type of message +// which can be sent is determined by the Res type parameter of the +// clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error { + return x.ServerStream.SendMsg(m) +} + +// Recv reads one message from the stream of requests generated by the client. +// The type of the message returned is determined by the Req type parameter +// of the clientStreamServer receiver. +func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) { + m := new(Req) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 00000000..07f01257 --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 00000000..10f4f798 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,143 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceEventLog mirrors golang.org/x/net/trace.EventLog. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceEventLog interface { + Printf(format string, a ...any) + Errorf(format string, a ...any) + Finish() +} + +// traceLog mirrors golang.org/x/net/trace.Trace. +// +// It exists in order to avoid importing x/net/trace on grpcnotrace builds. +type traceLog interface { + LazyLog(x fmt.Stringer, sensitive bool) + LazyPrintf(format string, a ...any) + SetError() + SetRecycler(f func(any)) + SetTraceInfo(traceID, spanID uint64) + SetMaxEvents(m int) + Finish() +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr traceLog + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []any +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go new file mode 100644 index 00000000..1da3a230 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_notrace.go @@ -0,0 +1,52 @@ +//go:build grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in +// turn enables binaries using gRPC-Go for dead code elimination, which can +// yield 10-15% improvements in binary size when tracing is not needed. + +import ( + "context" + "fmt" +) + +type notrace struct{} + +func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {} +func (notrace) LazyPrintf(format string, a ...any) {} +func (notrace) SetError() {} +func (notrace) SetRecycler(f func(any)) {} +func (notrace) SetTraceInfo(traceID, spanID uint64) {} +func (notrace) SetMaxEvents(m int) {} +func (notrace) Finish() {} + +func newTrace(family, title string) traceLog { + return notrace{} +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return ctx +} + +func newTraceEventLog(family, title string) traceEventLog { + return nil +} diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go new file mode 100644 index 00000000..88d6e857 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace_withtrace.go @@ -0,0 +1,39 @@ +//go:build !grpcnotrace + +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + + t "golang.org/x/net/trace" +) + +func newTrace(family, title string) traceLog { + return t.New(family, title) +} + +func newTraceContext(ctx context.Context, tr traceLog) context.Context { + return t.NewContext(ctx, tr) +} + +func newTraceEventLog(family, title string) traceEventLog { + return t.NewEventLog(family, title) +} diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 00000000..a96b6a6b --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.67.1" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 00000000..8f9e592f --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,685 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given [proto.Message]. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields and enum name values are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int +} + +// Unmarshal reads the given []byte and populates the given [proto.Message] +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...any) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + d.opts.RecursionLimit-- + if d.opts.RecursionLimit < 0 { + return errors.New("exceeded max recursion depth") + } + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + if val.IsValid() { + m.Set(fd, val) + } + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + if discardUnknown { + return protoreflect.Value{}, true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + if val.IsValid() { + list.Append(val) + } + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + if pval.IsValid() { + mmap.Set(pkey, pval) + } + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 00000000..ae71007c --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard [encoding/json] +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 00000000..0e72d853 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,380 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given [proto.Message] in JSON format using default options. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // EmitDefaultValues specifies whether to emit default-valued primitive fields, + // empty lists, and empty maps. The fields affected are as follows: + // ╔═══════╤════════════════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════════════════╣ + // ║ false │ non-optional scalar boolean fields ║ + // ║ 0 │ non-optional scalar numeric fields ║ + // ║ "" │ non-optional scalar string/byte fields ║ + // ║ [] │ empty repeated fields ║ + // ║ {} │ empty map fields ║ + // ╚═══════╧════════════════════════════════════════╝ + // + // Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields, + // i.e. presence-sensing fields that are omitted will remain omitted to preserve + // presence-sensing. + // EmitUnpopulated takes precedence over EmitDefaultValues since the former generates + // a strict superset of the latter. + EmitDefaultValues bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given [proto.Message] in the JSON format using options in +// Do not depend on the output being stable. Its output will change across +// different builds of your program, even when using the same version of the +// protobuf module. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct { + protoreflect.Message + + skipNull bool +} + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + if fd.HasPresence() { + if m.skipNull { + continue + } + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + switch { + case e.opts.EmitUnpopulated: + fields = unpopulatedFieldRanger{Message: m, skipNull: false} + case e.opts.EmitDefaultValues: + fields = unpopulatedFieldRanger{Message: m, skipNull: true} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 00000000..4b177c82 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,876 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + var open int + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose, json.ArrayClose: + open-- + case json.ObjectOpen, json.ArrayOpen: + open++ + if open > d.opts.RecursionLimit { + return errors.New("exceeded max recursion depth") + } + case json.EOF: + // This can only happen if there's a bug in Decoder.Read. + // Avoid an infinite loop if this does happen. + return errors.New("unexpected EOF") + } + if open == 0 { + return nil + } + } +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go index 029a6a12..08dad769 100644 --- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go +++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go @@ -5,7 +5,7 @@ // Package editionssupport defines constants for editions that are supported. package editionssupport -import descriptorpb "google.golang.org/protobuf/types/descriptorpb" +import "google.golang.org/protobuf/types/descriptorpb" const ( Minimum = descriptorpb.Edition_EDITION_PROTO2 diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 00000000..ea1d3e65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind&(Name|comma) != 0 || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 00000000..2999d713 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 00000000..f7fea7d8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 00000000..50578d65 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 00000000..934f2dcb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40..fa790e0f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8..67a51b32 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356..fd4d0c83 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b..7f67cbb6 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a2..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..7c1f66c8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..78be9df3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122..6254f5de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d4..741b5ed2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d..79e18666 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f68..fb8e15e8 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index 85617554..ebcb4a8a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -150,6 +150,7 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } f.L1.IsWeak = opts.GetWeak() + f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() } @@ -214,6 +215,9 @@ func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescript if xd.JsonName != nil { x.L2.StringName.InitJSON(xd.GetJsonName()) } + if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded { + x.L1.Kind = protoreflect.GroupKind + } } return xs, nil } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index 804830ed..002e0047 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" - gofeaturespb "google.golang.org/protobuf/types/gofeaturespb" + "google.golang.org/protobuf/types/gofeaturespb" ) var defaults = &descriptorpb.FeatureSetDefaults{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2a..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ea..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d38699..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..24615656 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index 9403eb07..6dea75cd 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -1217,11 +1217,9 @@ type FileDescriptorSet struct { func (x *FileDescriptorSet) Reset() { *x = FileDescriptorSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorSet) String() string { @@ -1232,7 +1230,7 @@ func (*FileDescriptorSet) ProtoMessage() {} func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1291,11 +1289,9 @@ type FileDescriptorProto struct { func (x *FileDescriptorProto) Reset() { *x = FileDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileDescriptorProto) String() string { @@ -1306,7 +1302,7 @@ func (*FileDescriptorProto) ProtoMessage() {} func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1434,11 +1430,9 @@ type DescriptorProto struct { func (x *DescriptorProto) Reset() { *x = DescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto) String() string { @@ -1449,7 +1443,7 @@ func (*DescriptorProto) ProtoMessage() {} func (x *DescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1561,11 +1555,9 @@ const ( func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions) String() string { @@ -1576,7 +1568,7 @@ func (*ExtensionRangeOptions) ProtoMessage() {} func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1680,11 +1672,9 @@ type FieldDescriptorProto struct { func (x *FieldDescriptorProto) Reset() { *x = FieldDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldDescriptorProto) String() string { @@ -1695,7 +1685,7 @@ func (*FieldDescriptorProto) ProtoMessage() {} func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1799,11 +1789,9 @@ type OneofDescriptorProto struct { func (x *OneofDescriptorProto) Reset() { *x = OneofDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofDescriptorProto) String() string { @@ -1814,7 +1802,7 @@ func (*OneofDescriptorProto) ProtoMessage() {} func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1863,11 +1851,9 @@ type EnumDescriptorProto struct { func (x *EnumDescriptorProto) Reset() { *x = EnumDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto) String() string { @@ -1878,7 +1864,7 @@ func (*EnumDescriptorProto) ProtoMessage() {} func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1941,11 +1927,9 @@ type EnumValueDescriptorProto struct { func (x *EnumValueDescriptorProto) Reset() { *x = EnumValueDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueDescriptorProto) String() string { @@ -1956,7 +1940,7 @@ func (*EnumValueDescriptorProto) ProtoMessage() {} func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2005,11 +1989,9 @@ type ServiceDescriptorProto struct { func (x *ServiceDescriptorProto) Reset() { *x = ServiceDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceDescriptorProto) String() string { @@ -2020,7 +2002,7 @@ func (*ServiceDescriptorProto) ProtoMessage() {} func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2082,11 +2064,9 @@ const ( func (x *MethodDescriptorProto) Reset() { *x = MethodDescriptorProto{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodDescriptorProto) String() string { @@ -2097,7 +2077,7 @@ func (*MethodDescriptorProto) ProtoMessage() {} func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2267,11 +2247,9 @@ const ( func (x *FileOptions) Reset() { *x = FileOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FileOptions) String() string { @@ -2282,7 +2260,7 @@ func (*FileOptions) ProtoMessage() {} func (x *FileOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2534,11 +2512,9 @@ const ( func (x *MessageOptions) Reset() { *x = MessageOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MessageOptions) String() string { @@ -2549,7 +2525,7 @@ func (*MessageOptions) ProtoMessage() {} func (x *MessageOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2707,11 +2683,9 @@ const ( func (x *FieldOptions) Reset() { *x = FieldOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions) String() string { @@ -2722,7 +2696,7 @@ func (*FieldOptions) ProtoMessage() {} func (x *FieldOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2849,11 +2823,9 @@ type OneofOptions struct { func (x *OneofOptions) Reset() { *x = OneofOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OneofOptions) String() string { @@ -2864,7 +2836,7 @@ func (*OneofOptions) ProtoMessage() {} func (x *OneofOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2929,11 +2901,9 @@ const ( func (x *EnumOptions) Reset() { *x = EnumOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumOptions) String() string { @@ -2944,7 +2914,7 @@ func (*EnumOptions) ProtoMessage() {} func (x *EnumOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3026,11 +2996,9 @@ const ( func (x *EnumValueOptions) Reset() { *x = EnumValueOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumValueOptions) String() string { @@ -3041,7 +3009,7 @@ func (*EnumValueOptions) ProtoMessage() {} func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3115,11 +3083,9 @@ const ( func (x *ServiceOptions) Reset() { *x = ServiceOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ServiceOptions) String() string { @@ -3130,7 +3096,7 @@ func (*ServiceOptions) ProtoMessage() {} func (x *ServiceOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3192,11 +3158,9 @@ const ( func (x *MethodOptions) Reset() { *x = MethodOptions{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MethodOptions) String() string { @@ -3207,7 +3171,7 @@ func (*MethodOptions) ProtoMessage() {} func (x *MethodOptions) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3274,11 +3238,9 @@ type UninterpretedOption struct { func (x *UninterpretedOption) Reset() { *x = UninterpretedOption{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption) String() string { @@ -3289,7 +3251,7 @@ func (*UninterpretedOption) ProtoMessage() {} func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3375,11 +3337,9 @@ type FeatureSet struct { func (x *FeatureSet) Reset() { *x = FeatureSet{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSet) String() string { @@ -3390,7 +3350,7 @@ func (*FeatureSet) ProtoMessage() {} func (x *FeatureSet) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3467,11 +3427,9 @@ type FeatureSetDefaults struct { func (x *FeatureSetDefaults) Reset() { *x = FeatureSetDefaults{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults) String() string { @@ -3482,7 +3440,7 @@ func (*FeatureSetDefaults) ProtoMessage() {} func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3578,11 +3536,9 @@ type SourceCodeInfo struct { func (x *SourceCodeInfo) Reset() { *x = SourceCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo) String() string { @@ -3593,7 +3549,7 @@ func (*SourceCodeInfo) ProtoMessage() {} func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3630,11 +3586,9 @@ type GeneratedCodeInfo struct { func (x *GeneratedCodeInfo) Reset() { *x = GeneratedCodeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo) String() string { @@ -3645,7 +3599,7 @@ func (*GeneratedCodeInfo) ProtoMessage() {} func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3679,11 +3633,9 @@ type DescriptorProto_ExtensionRange struct { func (x *DescriptorProto_ExtensionRange) Reset() { *x = DescriptorProto_ExtensionRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ExtensionRange) String() string { @@ -3694,7 +3646,7 @@ func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3744,11 +3696,9 @@ type DescriptorProto_ReservedRange struct { func (x *DescriptorProto_ReservedRange) Reset() { *x = DescriptorProto_ReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescriptorProto_ReservedRange) String() string { @@ -3759,7 +3709,7 @@ func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3813,11 +3763,9 @@ type ExtensionRangeOptions_Declaration struct { func (x *ExtensionRangeOptions_Declaration) Reset() { *x = ExtensionRangeOptions_Declaration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ExtensionRangeOptions_Declaration) String() string { @@ -3828,7 +3776,7 @@ func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3895,11 +3843,9 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnumDescriptorProto_EnumReservedRange) String() string { @@ -3910,7 +3856,7 @@ func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3950,11 +3896,9 @@ type FieldOptions_EditionDefault struct { func (x *FieldOptions_EditionDefault) Reset() { *x = FieldOptions_EditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_EditionDefault) String() string { @@ -3965,7 +3909,7 @@ func (*FieldOptions_EditionDefault) ProtoMessage() {} func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4018,11 +3962,9 @@ type FieldOptions_FeatureSupport struct { func (x *FieldOptions_FeatureSupport) Reset() { *x = FieldOptions_FeatureSupport{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FieldOptions_FeatureSupport) String() string { @@ -4033,7 +3975,7 @@ func (*FieldOptions_FeatureSupport) ProtoMessage() {} func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4092,11 +4034,9 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UninterpretedOption_NamePart) String() string { @@ -4107,7 +4047,7 @@ func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4154,11 +4094,9 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct { func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() { *x = FeatureSetDefaults_FeatureSetEditionDefault{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string { @@ -4169,7 +4107,7 @@ func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {} func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4305,11 +4243,9 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SourceCodeInfo_Location) String() string { @@ -4320,7 +4256,7 @@ func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4392,11 +4328,9 @@ type GeneratedCodeInfo_Annotation struct { func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_descriptor_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GeneratedCodeInfo_Annotation) String() string { @@ -4407,7 +4341,7 @@ func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_descriptor_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5385,424 +5319,6 @@ func file_google_protobuf_descriptor_proto_init() { if File_google_protobuf_descriptor_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*FileDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*FieldDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*OneofDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*ServiceDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*MethodDescriptorProto); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*FileOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*MessageOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*OneofOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*EnumOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*EnumValueOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*ServiceOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*MethodOptions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - case 3: - return &v.extensionFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ExtensionRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*DescriptorProto_ReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*ExtensionRangeOptions_Declaration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_EditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*FieldOptions_FeatureSupport); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*UninterpretedOption_NamePart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*SourceCodeInfo_Location); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_google_protobuf_descriptor_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*GeneratedCodeInfo_Annotation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go new file mode 100644 index 00000000..1ba1dfa5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go @@ -0,0 +1,718 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dynamicpb creates protocol buffer messages using runtime type information. +package dynamicpb + +import ( + "math" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// enum is a dynamic protoreflect.Enum. +type enum struct { + num protoreflect.EnumNumber + typ protoreflect.EnumType +} + +func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() } +func (e enum) Type() protoreflect.EnumType { return e.typ } +func (e enum) Number() protoreflect.EnumNumber { return e.num } + +// enumType is a dynamic protoreflect.EnumType. +type enumType struct { + desc protoreflect.EnumDescriptor +} + +// NewEnumType creates a new EnumType with the provided descriptor. +// +// EnumTypes created by this package are equal if their descriptors are equal. +// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2). +// +// Enum values created by the EnumType are equal if their numbers are equal. +func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType { + return enumType{desc} +} + +func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} } +func (et enumType) Descriptor() protoreflect.EnumDescriptor { return et.desc } + +// extensionType is a dynamic protoreflect.ExtensionType. +type extensionType struct { + desc extensionTypeDescriptor +} + +// A Message is a dynamically constructed protocol buffer message. +// +// Message implements the [google.golang.org/protobuf/proto.Message] interface, +// and may be used with all standard proto package functions +// such as Marshal, Unmarshal, and so forth. +// +// Message also implements the [protoreflect.Message] interface. +// See the [protoreflect] package documentation for that interface for how to +// get and set fields and otherwise interact with the contents of a Message. +// +// Reflection API functions which construct messages, such as NewField, +// return new dynamic messages of the appropriate type. Functions which take +// messages, such as Set for a message-value field, will accept any message +// with a compatible type. +// +// Operations which modify a Message are not safe for concurrent use. +type Message struct { + typ messageType + known map[protoreflect.FieldNumber]protoreflect.Value + ext map[protoreflect.FieldNumber]protoreflect.FieldDescriptor + unknown protoreflect.RawFields +} + +var ( + _ protoreflect.Message = (*Message)(nil) + _ protoreflect.ProtoMessage = (*Message)(nil) + _ protoiface.MessageV1 = (*Message)(nil) +) + +// NewMessage creates a new message with the provided descriptor. +func NewMessage(desc protoreflect.MessageDescriptor) *Message { + return &Message{ + typ: messageType{desc}, + known: make(map[protoreflect.FieldNumber]protoreflect.Value), + ext: make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor), + } +} + +// ProtoMessage implements the legacy message interface. +func (m *Message) ProtoMessage() {} + +// ProtoReflect implements the [protoreflect.ProtoMessage] interface. +func (m *Message) ProtoReflect() protoreflect.Message { + return m +} + +// String returns a string representation of a message. +func (m *Message) String() string { + return protoimpl.X.MessageStringOf(m) +} + +// Reset clears the message to be empty, but preserves the dynamic message type. +func (m *Message) Reset() { + m.known = make(map[protoreflect.FieldNumber]protoreflect.Value) + m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor) + m.unknown = nil +} + +// Descriptor returns the message descriptor. +func (m *Message) Descriptor() protoreflect.MessageDescriptor { + return m.typ.desc +} + +// Type returns the message type. +func (m *Message) Type() protoreflect.MessageType { + return m.typ +} + +// New returns a newly allocated empty message with the same descriptor. +// See [protoreflect.Message] for details. +func (m *Message) New() protoreflect.Message { + return m.Type().New() +} + +// Interface returns the message. +// See [protoreflect.Message] for details. +func (m *Message) Interface() protoreflect.ProtoMessage { + return m +} + +// ProtoMethods is an internal detail of the [protoreflect.Message] interface. +// Users should never call this directly. +func (m *Message) ProtoMethods() *protoiface.Methods { + return nil +} + +// Range visits every populated field in undefined order. +// See [protoreflect.Message] for details. +func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + for num, v := range m.known { + fd := m.ext[num] + if fd == nil { + fd = m.Descriptor().Fields().ByNumber(num) + } + if !isSet(fd, v) { + continue + } + if !f(fd, v) { + return + } + } +} + +// Has reports whether a field is populated. +// See [protoreflect.Message] for details. +func (m *Message) Has(fd protoreflect.FieldDescriptor) bool { + m.checkField(fd) + if fd.IsExtension() && m.ext[fd.Number()] != fd { + return false + } + v, ok := m.known[fd.Number()] + if !ok { + return false + } + return isSet(fd, v) +} + +// Clear clears a field. +// See [protoreflect.Message] for details. +func (m *Message) Clear(fd protoreflect.FieldDescriptor) { + m.checkField(fd) + num := fd.Number() + delete(m.known, num) + delete(m.ext, num) +} + +// Get returns the value of a field. +// See [protoreflect.Message] for details. +func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + switch { + case fd.IsMap(): + if v.Map().Len() > 0 { + return v + } + case fd.IsList(): + if v.List().Len() > 0 { + return v + } + default: + return v + } + } + switch { + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: fd}) + case fd.IsList(): + return protoreflect.ValueOfList(emptyList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}}) + case fd.Kind() == protoreflect.BytesKind: + return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...)) + default: + return fd.Default() + } +} + +// Mutable returns a mutable reference to a repeated, map, or message field. +// See [protoreflect.Message] for details. +func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + if !fd.IsMap() && !fd.IsList() && fd.Message() == nil { + panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName())) + } + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + num := fd.Number() + if fd.IsExtension() { + if fd != m.ext[num] { + m.ext[num] = fd + m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + } + return m.known[num] + } + if v, ok := m.known[num]; ok { + return v + } + m.clearOtherOneofFields(fd) + m.known[num] = m.NewField(fd) + if fd.IsExtension() { + m.ext[num] = fd + } + return m.known[num] +} + +// Set stores a value in a field. +// See [protoreflect.Message] for details. +func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.checkField(fd) + if m.known == nil { + panic(errors.New("%v: modification of read-only message", fd.FullName())) + } + if fd.IsExtension() { + isValid := true + switch { + case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v): + isValid = false + case fd.IsList(): + isValid = v.List().IsValid() + case fd.IsMap(): + isValid = v.Map().IsValid() + case fd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())) + } + m.ext[fd.Number()] = fd + } else { + typecheck(fd, v) + } + m.clearOtherOneofFields(fd) + m.known[fd.Number()] = v +} + +func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) { + od := fd.ContainingOneof() + if od == nil { + return + } + num := fd.Number() + for i := 0; i < od.Fields().Len(); i++ { + if n := od.Fields().Get(i).Number(); n != num { + delete(m.known, n) + } + } +} + +// NewField returns a new value for assignable to the field of a given descriptor. +// See [protoreflect.Message] for details. +func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.checkField(fd) + switch { + case fd.IsExtension(): + return fd.(protoreflect.ExtensionTypeDescriptor).Type().New() + case fd.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: fd, + mapv: make(map[any]protoreflect.Value), + }) + case fd.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: fd}) + case fd.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + default: + return fd.Default() + } +} + +// WhichOneof reports which field in a oneof is populated, returning nil if none are populated. +// See [protoreflect.Message] for details. +func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + for i := 0; i < od.Fields().Len(); i++ { + fd := od.Fields().Get(i) + if m.Has(fd) { + return fd + } + } + return nil +} + +// GetUnknown returns the raw unknown fields. +// See [protoreflect.Message] for details. +func (m *Message) GetUnknown() protoreflect.RawFields { + return m.unknown +} + +// SetUnknown sets the raw unknown fields. +// See [protoreflect.Message] for details. +func (m *Message) SetUnknown(r protoreflect.RawFields) { + if m.known == nil { + panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName())) + } + m.unknown = r +} + +// IsValid reports whether the message is valid. +// See [protoreflect.Message] for details. +func (m *Message) IsValid() bool { + return m.known != nil +} + +func (m *Message) checkField(fd protoreflect.FieldDescriptor) { + if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() { + if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok { + panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName())) + } + return + } + if fd.Parent() == m.Descriptor() { + return + } + fields := m.Descriptor().Fields() + index := fd.Index() + if index >= fields.Len() || fields.Get(index) != fd { + panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName())) + } +} + +type messageType struct { + desc protoreflect.MessageDescriptor +} + +// NewMessageType creates a new MessageType with the provided descriptor. +// +// MessageTypes created by this package are equal if their descriptors are equal. +// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2). +func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType { + return messageType{desc} +} + +func (mt messageType) New() protoreflect.Message { return NewMessage(mt.desc) } +func (mt messageType) Zero() protoreflect.Message { return &Message{typ: messageType{mt.desc}} } +func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc } +func (mt messageType) Enum(i int) protoreflect.EnumType { + if ed := mt.desc.Fields().Get(i).Enum(); ed != nil { + return NewEnumType(ed) + } + return nil +} +func (mt messageType) Message(i int) protoreflect.MessageType { + if md := mt.desc.Fields().Get(i).Message(); md != nil { + return NewMessageType(md) + } + return nil +} + +type emptyList struct { + desc protoreflect.FieldDescriptor +} + +func (x emptyList) Len() int { return 0 } +func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) } +func (x emptyList) Set(n int, v protoreflect.Value) { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) } +func (x emptyList) AppendMutable() protoreflect.Value { + panic(errors.New("modification of immutable list")) +} +func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) } +func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) } +func (x emptyList) IsValid() bool { return false } + +type dynamicList struct { + desc protoreflect.FieldDescriptor + list []protoreflect.Value +} + +func (x *dynamicList) Len() int { + return len(x.list) +} + +func (x *dynamicList) Get(n int) protoreflect.Value { + return x.list[n] +} + +func (x *dynamicList) Set(n int, v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list[n] = v +} + +func (x *dynamicList) Append(v protoreflect.Value) { + typecheckSingular(x.desc, v) + x.list = append(x.list, v) +} + +func (x *dynamicList) AppendMutable() protoreflect.Value { + if x.desc.Message() == nil { + panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName())) + } + v := x.NewElement() + x.Append(v) + return v +} + +func (x *dynamicList) Truncate(n int) { + // Zero truncated elements to avoid keeping data live. + for i := n; i < len(x.list); i++ { + x.list[i] = protoreflect.Value{} + } + x.list = x.list[:n] +} + +func (x *dynamicList) NewElement() protoreflect.Value { + return newListEntry(x.desc) +} + +func (x *dynamicList) IsValid() bool { + return true +} + +type dynamicMap struct { + desc protoreflect.FieldDescriptor + mapv map[any]protoreflect.Value +} + +func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] } +func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) { + typecheckSingular(x.desc.MapKey(), k.Value()) + typecheckSingular(x.desc.MapValue(), v) + x.mapv[k.Interface()] = v +} +func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() } +func (x *dynamicMap) Clear(k protoreflect.MapKey) { delete(x.mapv, k.Interface()) } +func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value { + if x.desc.MapValue().Message() == nil { + panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName())) + } + v := x.Get(k) + if !v.IsValid() { + v = x.NewValue() + x.Set(k, v) + } + return v +} +func (x *dynamicMap) Len() int { return len(x.mapv) } +func (x *dynamicMap) NewValue() protoreflect.Value { + if md := x.desc.MapValue().Message(); md != nil { + return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect()) + } + return x.desc.MapValue().Default() +} +func (x *dynamicMap) IsValid() bool { + return x.mapv != nil +} + +func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { + for k, v := range x.mapv { + if !f(protoreflect.ValueOf(k).MapKey(), v) { + return + } + } +} + +func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsMap(): + return v.Map().Len() > 0 + case fd.IsList(): + return v.List().Len() > 0 + case fd.ContainingOneof() != nil: + return true + case !fd.HasPresence() && !fd.IsExtension(): + switch fd.Kind() { + case protoreflect.BoolKind: + return v.Bool() + case protoreflect.EnumKind: + return v.Enum() != 0 + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: + return v.Int() != 0 + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: + return v.Uint() != 0 + case protoreflect.FloatKind, protoreflect.DoubleKind: + return v.Float() != 0 || math.Signbit(v.Float()) + case protoreflect.StringKind: + return v.String() != "" + case protoreflect.BytesKind: + return len(v.Bytes()) > 0 + } + } + return true +} + +func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := typeIsValid(fd, v); err != nil { + panic(err) + } +} + +func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + switch { + case !v.IsValid(): + return errors.New("%v: assigning invalid value", fd.FullName()) + case fd.IsMap(): + if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil + case fd.IsList(): + switch list := v.Interface().(type) { + case *dynamicList: + if list.desc == fd && list.IsValid() { + return nil + } + case emptyList: + if list.desc == fd && list.IsValid() { + return nil + } + } + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + default: + return singularTypeIsValid(fd, v) + } +} + +func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + if err := singularTypeIsValid(fd, v); err != nil { + panic(err) + } +} + +func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error { + vi := v.Interface() + var ok bool + switch fd.Kind() { + case protoreflect.BoolKind: + _, ok = vi.(bool) + case protoreflect.EnumKind: + // We could check against the valid set of enum values, but do not. + _, ok = vi.(protoreflect.EnumNumber) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + _, ok = vi.(int32) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + _, ok = vi.(uint32) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + _, ok = vi.(int64) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + _, ok = vi.(uint64) + case protoreflect.FloatKind: + _, ok = vi.(float32) + case protoreflect.DoubleKind: + _, ok = vi.(float64) + case protoreflect.StringKind: + _, ok = vi.(string) + case protoreflect.BytesKind: + _, ok = vi.([]byte) + case protoreflect.MessageKind, protoreflect.GroupKind: + var m protoreflect.Message + m, ok = vi.(protoreflect.Message) + if ok && m.Descriptor().FullName() != fd.Message().FullName() { + return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName()) + } + if dm, ok := vi.(*Message); ok && dm.known == nil { + return errors.New("%v: assigning invalid zero-value message", fd.FullName()) + } + } + if !ok { + return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()) + } + return nil +} + +func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value { + switch fd.Kind() { + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.EnumKind: + return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.MessageKind, protoreflect.GroupKind: + return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect()) + } + panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind())) +} + +// NewExtensionType creates a new ExtensionType with the provided descriptor. +// +// Dynamic ExtensionTypes with the same descriptor compare as equal. That is, +// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2). +// +// The InterfaceOf and ValueOf methods of the extension type are defined as: +// +// func (xt extensionType) ValueOf(iv any) protoreflect.Value { +// return protoreflect.ValueOf(iv) +// } +// +// func (xt extensionType) InterfaceOf(v protoreflect.Value) any { +// return v.Interface() +// } +// +// The Go type used by the proto.GetExtension and proto.SetExtension functions +// is determined by these methods, and is therefore equivalent to the Go type +// used to represent a protoreflect.Value. See the protoreflect.Value +// documentation for more details. +func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType { + if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok { + desc = xt.Descriptor() + } + return extensionType{extensionTypeDescriptor{desc}} +} + +func (xt extensionType) New() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{ + desc: xt.desc, + mapv: make(map[any]protoreflect.Value), + }) + case xt.desc.IsList(): + return protoreflect.ValueOfList(&dynamicList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message())) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) Zero() protoreflect.Value { + switch { + case xt.desc.IsMap(): + return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc}) + case xt.desc.Cardinality() == protoreflect.Repeated: + return protoreflect.ValueOfList(emptyList{desc: xt.desc}) + case xt.desc.Message() != nil: + return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}}) + default: + return xt.desc.Default() + } +} + +func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { + return xt.desc +} + +func (xt extensionType) ValueOf(iv any) protoreflect.Value { + v := protoreflect.ValueOf(iv) + typecheck(xt.desc, v) + return v +} + +func (xt extensionType) InterfaceOf(v protoreflect.Value) any { + typecheck(xt.desc, v) + return v.Interface() +} + +func (xt extensionType) IsValidInterface(iv any) bool { + return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil +} + +func (xt extensionType) IsValidValue(v protoreflect.Value) bool { + return typeIsValid(xt.desc, v) == nil +} + +type extensionTypeDescriptor struct { + protoreflect.ExtensionDescriptor +} + +func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType { + return extensionType{xt} +} + +func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { + return xt.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go new file mode 100644 index 00000000..c432817b --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go @@ -0,0 +1,184 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dynamicpb + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +type extField struct { + name protoreflect.FullName + number protoreflect.FieldNumber +} + +// A Types is a collection of dynamically constructed descriptors. +// Its methods are safe for concurrent use. +// +// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver]. +// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver]. +type Types struct { + // atomicExtFiles is used with sync/atomic and hence must be the first word + // of the struct to guarantee 64-bit alignment. + // + // TODO(stapelberg): once we only support Go 1.19 and newer, switch this + // field to be of type atomic.Uint64 to guarantee alignment on + // stack-allocated values, too. + atomicExtFiles uint64 + extMu sync.Mutex + + files *protoregistry.Files + + extensionsByMessage map[extField]protoreflect.ExtensionDescriptor +} + +// NewTypes creates a new Types registry with the provided files. +// The Files registry is retained, and changes to Files will be reflected in Types. +// It is not safe to concurrently change the Files while calling Types methods. +func NewTypes(f *protoregistry.Files) *Types { + return &Types{ + files: f, + } +} + +// FindEnumByName looks up an enum by its full name; +// e.g., "google.protobuf.Field.Kind". +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want enum", descName(d)) + } + return NewEnumType(ed), nil +} + +// FindExtensionByName looks up an extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + xd, ok := d.(protoreflect.ExtensionDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want extension", descName(d)) + } + return NewExtensionType(xd), nil +} + +// FindExtensionByNumber looks up an extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + // Construct the extension number map lazily, since not every user will need it. + // Update the map if new files are added to the registry. + if atomic.LoadUint64(&t.atomicExtFiles) != uint64(t.files.NumFiles()) { + t.updateExtensions() + } + xd := t.extensionsByMessage[extField{message, field}] + if xd == nil { + return nil, protoregistry.NotFound + } + return NewExtensionType(xd), nil +} + +// FindMessageByName looks up a message by its full name; +// e.g. "google.protobuf.Any". +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) { + d, err := t.files.FindDescriptorByName(name) + if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("found wrong type: got %v, want message", descName(d)) + } + return NewMessageType(md), nil +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, [protoregistry.NotFound]) if not found. +func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + // This function is similar to FindMessageByName but + // truncates anything before and including '/' in the URL. + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + return t.FindMessageByName(message) +} + +func (t *Types) updateExtensions() { + t.extMu.Lock() + defer t.extMu.Unlock() + if atomic.LoadUint64(&t.atomicExtFiles) == uint64(t.files.NumFiles()) { + return + } + defer atomic.StoreUint64(&t.atomicExtFiles, uint64(t.files.NumFiles())) + t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool { + t.registerExtensions(fd.Extensions()) + t.registerExtensionsInMessages(fd.Messages()) + return true + }) +} + +func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) { + count := mds.Len() + for i := 0; i < count; i++ { + md := mds.Get(i) + t.registerExtensions(md.Extensions()) + t.registerExtensionsInMessages(md.Messages()) + } +} + +func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) { + count := xds.Len() + for i := 0; i < count; i++ { + xd := xds.Get(i) + field := xd.Number() + message := xd.ContainingMessage().FullName() + if t.extensionsByMessage == nil { + t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor) + } + t.extensionsByMessage[extField{message, field}] = xd + } +} + +func descName(d protoreflect.Descriptor) string { + switch d.(type) { + case protoreflect.EnumDescriptor: + return "enum" + case protoreflect.EnumValueDescriptor: + return "enum value" + case protoreflect.MessageDescriptor: + return "message" + case protoreflect.ExtensionDescriptor: + return "extension" + case protoreflect.ServiceDescriptor: + return "service" + default: + return fmt.Sprintf("%T", d) + } +} diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index a2ca940c..c7e860fc 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -29,11 +29,9 @@ type GoFeatures struct { func (x *GoFeatures) Reset() { *x = GoFeatures{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_go_features_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_go_features_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GoFeatures) String() string { @@ -44,7 +42,7 @@ func (*GoFeatures) ProtoMessage() {} func (x *GoFeatures) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_go_features_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -145,20 +143,6 @@ func file_google_protobuf_go_features_proto_init() { if File_google_protobuf_go_features_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_go_features_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GoFeatures); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 7172b43d..87da199a 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -368,11 +368,9 @@ func (x *Any) UnmarshalNew() (proto.Message, error) { func (x *Any) Reset() { *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Any) String() string { @@ -383,7 +381,7 @@ func (*Any) ProtoMessage() {} func (x *Any) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -461,20 +459,6 @@ func file_google_protobuf_any_proto_init() { if File_google_protobuf_any_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 1b71bcd9..b99d4d24 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -245,11 +245,9 @@ func (x *Duration) check() uint { func (x *Duration) Reset() { *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Duration) String() string { @@ -260,7 +258,7 @@ func (*Duration) ProtoMessage() {} func (x *Duration) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -339,20 +337,6 @@ func file_google_protobuf_duration_proto_init() { if File_google_protobuf_duration_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 00000000..1761bc9c --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,150 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, + 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []any{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 00000000..19de8d37 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,572 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []any{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go new file mode 100644 index 00000000..8f206a66 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -0,0 +1,782 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// # Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by any, map[string]any, and []any. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the any, map[string]any, and []any +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// # Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]any{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]any{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []any{ +// map[string]any{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]any{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []any{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +package structpb + +import ( + base64 "encoding/base64" + json "encoding/json" + protojson "google.golang.org/protobuf/encoding/protojson" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + utf8 "unicode/utf8" +) + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +// Enum value maps for NullValue. +var ( + NullValue_name = map[int32]string{ + 0: "NULL_VALUE", + } + NullValue_value = map[string]int32{ + "NULL_VALUE": 0, + } +) + +func (x NullValue) Enum() *NullValue { + p := new(NullValue) + *p = x + return p +} + +func (x NullValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NullValue) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() +} + +func (NullValue) Type() protoreflect.EnumType { + return &file_google_protobuf_struct_proto_enumTypes[0] +} + +func (x NullValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NullValue.Descriptor instead. +func (NullValue) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]any) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]any { + f := x.GetFields() + vs := make(map[string]any, len(f)) + for k, v := range f { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Struct) Reset() { + *x = Struct{} + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Struct) ProtoMessage() {} + +func (x *Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Struct.ProtoReflect.Descriptor instead. +func (*Struct) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *Struct) GetFields() map[string]*Value { + if x != nil { + return x.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of these +// variants. Absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of value. + // + // Types that are assignable to Kind: + // + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗ +// ║ Go type │ Conversion ║ +// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣ +// ║ nil │ stored as NullValue ║ +// ║ bool │ stored as BoolValue ║ +// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║ +// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║ +// ║ float32, float64 │ stored as NumberValue ║ +// ║ json.Number │ stored as NumberValue ║ +// ║ string │ stored as StringValue; must be valid UTF-8 ║ +// ║ []byte │ stored as StringValue; base64-encoded ║ +// ║ map[string]any │ stored as StructValue ║ +// ║ []any │ stored as ListValue ║ +// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝ +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v any) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int8: + return NewNumberValue(float64(v)), nil + case int16: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint8: + return NewNumberValue(float64(v)), nil + case uint16: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case json.Number: + n, err := v.Float64() + if err != nil { + return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err) + } + return NewNumberValue(n), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]any: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []any: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() any { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Value) Reset() { + *x = Value{} + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (x *Value) GetNumberValue() float64 { + if x, ok := x.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetStructValue() *Struct { + if x, ok := x.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Represents a null value. + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + // Represents a double value. + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a string value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + // Represents a boolean value. + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + // Represents a structured value. + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + // Represents a repeated `Value`. + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []any) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []any { + vals := x.GetValues() + vs := make([]any, len(vals)) + for i, v := range vals { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *ListValue) Reset() { + *x = ListValue{} + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +var File_google_protobuf_struct_proto protoreflect.FileDescriptor + +var file_google_protobuf_struct_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, + 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, + 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, + 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, + 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, + 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_protobuf_struct_proto_rawDescOnce sync.Once + file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc +) + +func file_google_protobuf_struct_proto_rawDescGZIP() []byte { + file_google_protobuf_struct_proto_rawDescOnce.Do(func() { + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + }) + return file_google_protobuf_struct_proto_rawDescData +} + +var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_struct_proto_goTypes = []any{ + (NullValue)(0), // 0: google.protobuf.NullValue + (*Struct)(nil), // 1: google.protobuf.Struct + (*Value)(nil), // 2: google.protobuf.Value + (*ListValue)(nil), // 3: google.protobuf.ListValue + nil, // 4: google.protobuf.Struct.FieldsEntry +} +var file_google_protobuf_struct_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry + 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct + 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue + 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value + 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_protobuf_struct_proto_init() } +func file_google_protobuf_struct_proto_init() { + if File_google_protobuf_struct_proto != nil { + return + } + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_struct_proto_goTypes, + DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, + EnumInfos: file_google_protobuf_struct_proto_enumTypes, + MessageInfos: file_google_protobuf_struct_proto_msgTypes, + }.Build() + File_google_protobuf_struct_proto = out.File + file_google_protobuf_struct_proto_rawDesc = nil + file_google_protobuf_struct_proto_goTypes = nil + file_google_protobuf_struct_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645..0d20722d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go new file mode 100644 index 00000000..006060e5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -0,0 +1,632 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. +// +// These wrappers have no meaningful use within repeated fields as they lack +// the ability to detect presence on individual elements. +// These wrappers have no meaningful use within a map or a oneof since +// individual entries of a map or fields of a oneof can already detect presence. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrapperspb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Double stores v in a new DoubleValue and returns a pointer to it. +func Double(v float64) *DoubleValue { + return &DoubleValue{Value: v} +} + +func (x *DoubleValue) Reset() { + *x = DoubleValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DoubleValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DoubleValue) ProtoMessage() {} + +func (x *DoubleValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead. +func (*DoubleValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0} +} + +func (x *DoubleValue) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Float stores v in a new FloatValue and returns a pointer to it. +func Float(v float32) *FloatValue { + return &FloatValue{Value: v} +} + +func (x *FloatValue) Reset() { + *x = FloatValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FloatValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatValue) ProtoMessage() {} + +func (x *FloatValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead. +func (*FloatValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1} +} + +func (x *FloatValue) GetValue() float32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int64 stores v in a new Int64Value and returns a pointer to it. +func Int64(v int64) *Int64Value { + return &Int64Value{Value: v} +} + +func (x *Int64Value) Reset() { + *x = Int64Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int64Value) ProtoMessage() {} + +func (x *Int64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead. +func (*Int64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2} +} + +func (x *Int64Value) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt64 stores v in a new UInt64Value and returns a pointer to it. +func UInt64(v uint64) *UInt64Value { + return &UInt64Value{Value: v} +} + +func (x *UInt64Value) Reset() { + *x = UInt64Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt64Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt64Value) ProtoMessage() {} + +func (x *UInt64Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead. +func (*UInt64Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3} +} + +func (x *UInt64Value) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Int32 stores v in a new Int32Value and returns a pointer to it. +func Int32(v int32) *Int32Value { + return &Int32Value{Value: v} +} + +func (x *Int32Value) Reset() { + *x = Int32Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Int32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32Value) ProtoMessage() {} + +func (x *Int32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead. +func (*Int32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4} +} + +func (x *Int32Value) GetValue() int32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// UInt32 stores v in a new UInt32Value and returns a pointer to it. +func UInt32(v uint32) *UInt32Value { + return &UInt32Value{Value: v} +} + +func (x *UInt32Value) Reset() { + *x = UInt32Value{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UInt32Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UInt32Value) ProtoMessage() {} + +func (x *UInt32Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead. +func (*UInt32Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5} +} + +func (x *UInt32Value) GetValue() uint32 { + if x != nil { + return x.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bool stores v in a new BoolValue and returns a pointer to it. +func Bool(v bool) *BoolValue { + return &BoolValue{Value: v} +} + +func (x *BoolValue) Reset() { + *x = BoolValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BoolValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoolValue) ProtoMessage() {} + +func (x *BoolValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead. +func (*BoolValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6} +} + +func (x *BoolValue) GetValue() bool { + if x != nil { + return x.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// String stores v in a new StringValue and returns a pointer to it. +func String(v string) *StringValue { + return &StringValue{Value: v} +} + +func (x *StringValue) Reset() { + *x = StringValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StringValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringValue) ProtoMessage() {} + +func (x *StringValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringValue.ProtoReflect.Descriptor instead. +func (*StringValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7} +} + +func (x *StringValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +// Bytes stores v in a new BytesValue and returns a pointer to it. +func Bytes(v []byte) *BytesValue { + return &BytesValue{Value: v} +} + +func (x *BytesValue) Reset() { + *x = BytesValue{} + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BytesValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BytesValue) ProtoMessage() {} + +func (x *BytesValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_wrappers_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead. +func (*BytesValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8} +} + +func (x *BytesValue) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor + +var file_google_protobuf_wrappers_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, + 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, + 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, + 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, + 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, + 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_wrappers_proto_rawDescOnce sync.Once + file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc +) + +func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { + file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + }) + return file_google_protobuf_wrappers_proto_rawDescData +} + +var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9) +var file_google_protobuf_wrappers_proto_goTypes = []any{ + (*DoubleValue)(nil), // 0: google.protobuf.DoubleValue + (*FloatValue)(nil), // 1: google.protobuf.FloatValue + (*Int64Value)(nil), // 2: google.protobuf.Int64Value + (*UInt64Value)(nil), // 3: google.protobuf.UInt64Value + (*Int32Value)(nil), // 4: google.protobuf.Int32Value + (*UInt32Value)(nil), // 5: google.protobuf.UInt32Value + (*BoolValue)(nil), // 6: google.protobuf.BoolValue + (*StringValue)(nil), // 7: google.protobuf.StringValue + (*BytesValue)(nil), // 8: google.protobuf.BytesValue +} +var file_google_protobuf_wrappers_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_wrappers_proto_init() } +func file_google_protobuf_wrappers_proto_init() { + if File_google_protobuf_wrappers_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + NumEnums: 0, + NumMessages: 9, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_wrappers_proto_goTypes, + DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs, + MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, + }.Build() + File_google_protobuf_wrappers_proto = out.File + file_google_protobuf_wrappers_proto_rawDesc = nil + file_google_protobuf_wrappers_proto_goTypes = nil + file_google_protobuf_wrappers_proto_depIdxs = nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go new file mode 100644 index 00000000..88db1ffa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=apiserver.k8s.io + +// Package apiserver is the internal version of the API. +package apiserver // import "k8s.io/apiserver/pkg/apis/apiserver" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go new file mode 100644 index 00000000..8a8202cb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/install/install.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/apis/apiserver" + v1 "k8s.io/apiserver/pkg/apis/apiserver/v1" + "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" + "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(apiserver.AddToScheme(scheme)) + + // v1alpha is in the k8s.io-suffixed API group + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) + + // v1alpha is in the k8s.io-suffixed API group + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1beta1.SchemeGroupVersion)) + + // v1 is in the config.k8s.io-suffixed API group + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go new file mode 100644 index 00000000..fd0b087c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const LegacyGroupName = "apiserver.k8s.io" +const GroupName = "apiserver.config.k8s.io" + +// LegacySchemeGroupVersion is group version used to register these objects +var LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: runtime.APIVersionInternal} + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &AdmissionConfiguration{}, + &EgressSelectorConfiguration{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionConfiguration{}, + &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, + &EncryptionConfiguration{}, + &EgressSelectorConfiguration{}, + &TracingConfiguration{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go new file mode 100644 index 00000000..af70fe24 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types.go @@ -0,0 +1,406 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + tracingapi "k8s.io/component-base/tracing/api/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressSelectorConfiguration provides versioned configuration for egress selector clients. +type EgressSelectorConfiguration struct { + metav1.TypeMeta + + // EgressSelections contains a list of egress selection client configurations + EgressSelections []EgressSelection +} + +// EgressSelection provides the configuration for a single egress selection client. +type EgressSelection struct { + // Name is the name of the egress selection. + // Currently supported values are "controlplane", "etcd" and "cluster" + Name string + + // Connection is the exact information used to configure the egress selection + Connection Connection +} + +// Connection provides the configuration for a single egress selection client. +type Connection struct { + // Protocol is the protocol used to connect from client to the konnectivity server. + ProxyProtocol ProtocolType + + // Transport defines the transport configurations we use to dial to the konnectivity server. + // This is required if ProxyProtocol is HTTPConnect or GRPC. + // +optional + Transport *Transport +} + +// ProtocolType is a set of valid values for Connection.ProtocolType +type ProtocolType string + +// Valid types for ProtocolType for konnectivity server +const ( + // Use HTTPConnect to connect to konnectivity server + ProtocolHTTPConnect ProtocolType = "HTTPConnect" + // Use grpc to connect to konnectivity server + ProtocolGRPC ProtocolType = "GRPC" + // Connect directly (skip konnectivity server) + ProtocolDirect ProtocolType = "Direct" +) + +// Transport defines the transport configurations we use to dial to the konnectivity server +type Transport struct { + // TCP is the TCP configuration for communicating with the konnectivity server via TCP + // ProxyProtocol of GRPC is not supported with TCP transport at the moment + // Requires at least one of TCP or UDS to be set + // +optional + TCP *TCPTransport + + // UDS is the UDS configuration for communicating with the konnectivity server via UDS + // Requires at least one of TCP or UDS to be set + // +optional + UDS *UDSTransport +} + +// TCPTransport provides the information to connect to konnectivity server via TCP +type TCPTransport struct { + // URL is the location of the konnectivity server to connect to. + // As an example it might be "https://127.0.0.1:8131" + URL string + + // TLSConfig is the config needed to use TLS when connecting to konnectivity server + // +optional + TLSConfig *TLSConfig +} + +// UDSTransport provides the information to connect to konnectivity server via UDS +type UDSTransport struct { + // UDSName is the name of the unix domain socket to connect to konnectivity server + // This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket) + UDSName string +} + +// TLSConfig provides the authentication information to connect to konnectivity server +// Only used with TCPTransport +type TLSConfig struct { + // caBundle is the file location of the CA to be used to determine trust with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // If absent while TCPTransport.URL is prefixed with https://, default to system trust roots. + // +optional + CABundle string + + // clientKey is the file location of the client key to authenticate with the konnectivity server + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientKey string + + // clientCert is the file location of the client certificate to authenticate with the konnectivity server + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientCert string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TracingConfiguration provides versioned configuration for tracing clients. +type TracingConfiguration struct { + metav1.TypeMeta + + // Embed the component config tracing configuration struct + tracingapi.TracingConfiguration +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + JWT []JWTAuthenticator + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string +} + +// JWTAuthenticator provides the configuration for a single JWT authenticator. +type JWTAuthenticator struct { + Issuer Issuer + ClaimValidationRules []ClaimValidationRule + ClaimMappings ClaimMappings + UserValidationRules []UserValidationRule +} + +// Issuer provides the configuration for an external provider's specific settings. +type Issuer struct { + // url points to the issuer URL in a format https://url or https://url/path. + // This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + // Same value as the --oidc-issuer-url flag. + // Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + // Required to be unique across all JWT authenticators. + // Note that egress selection configuration is not used for this network connection. + // +required + URL string + // discoveryURL, if specified, overrides the URL used to fetch discovery + // information instead of using "{url}/.well-known/openid-configuration". + // The exact value specified is used, so "/.well-known/openid-configuration" + // must be included in discoveryURL if needed. + // + // The "issuer" field in the fetched discovery information must match the "issuer.url" field + // in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + // This is for scenarios where the well-known and jwks endpoints are hosted at a different + // location than the issuer (such as locally in the cluster). + // + // Example: + // A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + // and discovery information is available at '/.well-known/openid-configuration'. + // discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + // certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + // must be set to 'oidc.oidc-namespace'. + // + // curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + // { + // issuer: "https://oidc.example.com" (.url field) + // } + // + // discoveryURL must be different from url. + // Required to be unique across all JWT authenticators. + // Note that egress selection configuration is not used for this network connection. + // +optional + DiscoveryURL string + CertificateAuthority string + Audiences []string + AudienceMatchPolicy AudienceMatchPolicyType +} + +// AudienceMatchPolicyType is a set of valid values for Issuer.AudienceMatchPolicy +type AudienceMatchPolicyType string + +// Valid types for AudienceMatchPolicyType +const ( + AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny" +) + +// ClaimValidationRule provides the configuration for a single claim validation rule. +type ClaimValidationRule struct { + Claim string + RequiredValue string + + Expression string + Message string +} + +// ClaimMappings provides the configuration for claim mapping +type ClaimMappings struct { + Username PrefixedClaimOrExpression + Groups PrefixedClaimOrExpression + UID ClaimOrExpression + Extra []ExtraMapping +} + +// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. +type PrefixedClaimOrExpression struct { + Claim string + Prefix *string + + Expression string +} + +// ClaimOrExpression provides the configuration for a single claim or expression. +type ClaimOrExpression struct { + Claim string + Expression string +} + +// ExtraMapping provides the configuration for a single extra mapping. +type ExtraMapping struct { + Key string + ValueExpression string +} + +// UserValidationRule provides the configuration for a single user validation rule. +type UserValidationRule struct { + Expression string + Message string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type AuthorizerType + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + Webhook *WebhookConfiguration +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go new file mode 100644 index 00000000..fb663050 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/types_encryption.go @@ -0,0 +1,149 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/* +EncryptionConfiguration stores the complete configuration for encryption providers. +It also allows the use of wildcards to specify the resources that should be encrypted. +Use '*.' to encrypt all resources within a group or '*.*' to encrypt all resources. +'*.' can be used to encrypt all resource in the core group. '*.*' will encrypt all +resources, even custom resources that are added after API server start. +Use of wildcards that overlap within the same resource list or across multiple +entries are not allowed since part of the configuration would be ineffective. +Resource lists are processed in order, with earlier lists taking precedence. + +Example: + + kind: EncryptionConfiguration + apiVersion: apiserver.config.k8s.io/v1 + resources: + - resources: + - events + providers: + - identity: {} # do not encrypt events even though *.* is specified below + - resources: + - secrets + - configmaps + - pandas.awesome.bears.example + providers: + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - resources: + - '*.apps' + providers: + - aescbc: + keys: + - name: key2 + secret: c2VjcmV0IGlzIHNlY3VyZSwgb3IgaXMgaXQ/Cg== + - resources: + - '*.*' + providers: + - aescbc: + keys: + - name: key3 + secret: c2VjcmV0IGlzIHNlY3VyZSwgSSB0aGluaw== +*/ +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from `resource` or `resource.group` of the group/version/resource. + // eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas. + // Use '*.*' to encrypt all resources and '*.' to encrypt all resources in a specific group. + // eg: '*.awesome.bears.example' will encrypt all resources in the group 'awesome.bears.example'. + // eg: '*.' will encrypt all resources in the core group (such as pods, configmaps, etc). + Resources []string + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity, kms. + Providers []ProviderConfiguration +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string + // secret is the actual key, encoded in base64. + Secret string +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // apiVersion of KeyManagementService + // +optional + APIVersion string + // name is the name of the KMS plugin to be used. + Name string + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. This field is only allowed for KMS v1 providers. + // +optional + CacheSize *int32 + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go new file mode 100644 index 00000000..b71b53c6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/defaults.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + defaultTimeout = &metav1.Duration{Duration: 3 * time.Second} + defaultCacheSize int32 = 1000 + defaultAPIVersion = "v1" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_KMSConfiguration applies defaults to KMSConfiguration. +func SetDefaults_KMSConfiguration(obj *KMSConfiguration) { + if obj.Timeout == nil { + obj.Timeout = defaultTimeout + } + + if obj.APIVersion == "" { + obj.APIVersion = defaultAPIVersion + } + + // cacheSize is relevant only for kms v1 + if obj.CacheSize == nil && obj.APIVersion == "v1" { + obj.CacheSize = &defaultCacheSize + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go new file mode 100644 index 00000000..91458aee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.config.k8s.io + +// Package v1 is the v1 version of the API. +package v1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go new file mode 100644 index 00000000..0de8db49 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) + localSchemeBuilder.Register(addDefaultingFuncs) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionConfiguration{}, + &EncryptionConfiguration{}, + ) + // also register into the v1 group as EncryptionConfig (due to a docs bug) + scheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "EncryptionConfig"}, &EncryptionConfiguration{}) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go new file mode 100644 index 00000000..e139dceb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration `json:"plugins"` +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string `json:"name"` + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string `json:"path"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown `json:"configuration"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go new file mode 100644 index 00000000..7aced8cf --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/types_encryption.go @@ -0,0 +1,149 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +/* +EncryptionConfiguration stores the complete configuration for encryption providers. +It also allows the use of wildcards to specify the resources that should be encrypted. +Use '*.' to encrypt all resources within a group or '*.*' to encrypt all resources. +'*.' can be used to encrypt all resource in the core group. '*.*' will encrypt all +resources, even custom resources that are added after API server start. +Use of wildcards that overlap within the same resource list or across multiple +entries are not allowed since part of the configuration would be ineffective. +Resource lists are processed in order, with earlier lists taking precedence. + +Example: + + kind: EncryptionConfiguration + apiVersion: apiserver.config.k8s.io/v1 + resources: + - resources: + - events + providers: + - identity: {} # do not encrypt events even though *.* is specified below + - resources: + - secrets + - configmaps + - pandas.awesome.bears.example + providers: + - aescbc: + keys: + - name: key1 + secret: c2VjcmV0IGlzIHNlY3VyZQ== + - resources: + - '*.apps' + providers: + - aescbc: + keys: + - name: key2 + secret: c2VjcmV0IGlzIHNlY3VyZSwgb3IgaXMgaXQ/Cg== + - resources: + - '*.*' + providers: + - aescbc: + keys: + - name: key3 + secret: c2VjcmV0IGlzIHNlY3VyZSwgSSB0aGluaw== +*/ +type EncryptionConfiguration struct { + metav1.TypeMeta + // resources is a list containing resources, and their corresponding encryption providers. + Resources []ResourceConfiguration `json:"resources"` +} + +// ResourceConfiguration stores per resource configuration. +type ResourceConfiguration struct { + // resources is a list of kubernetes resources which have to be encrypted. The resource names are derived from `resource` or `resource.group` of the group/version/resource. + // eg: pandas.awesome.bears.example is a custom resource with 'group': awesome.bears.example, 'resource': pandas. + // Use '*.*' to encrypt all resources and '*.' to encrypt all resources in a specific group. + // eg: '*.awesome.bears.example' will encrypt all resources in the group 'awesome.bears.example'. + // eg: '*.' will encrypt all resources in the core group (such as pods, configmaps, etc). + Resources []string `json:"resources"` + // providers is a list of transformers to be used for reading and writing the resources to disk. + // eg: aesgcm, aescbc, secretbox, identity, kms. + Providers []ProviderConfiguration `json:"providers"` +} + +// ProviderConfiguration stores the provided configuration for an encryption provider. +type ProviderConfiguration struct { + // aesgcm is the configuration for the AES-GCM transformer. + AESGCM *AESConfiguration `json:"aesgcm,omitempty"` + // aescbc is the configuration for the AES-CBC transformer. + AESCBC *AESConfiguration `json:"aescbc,omitempty"` + // secretbox is the configuration for the Secretbox based transformer. + Secretbox *SecretboxConfiguration `json:"secretbox,omitempty"` + // identity is the (empty) configuration for the identity transformer. + Identity *IdentityConfiguration `json:"identity,omitempty"` + // kms contains the name, cache size and path to configuration file for a KMS based envelope transformer. + KMS *KMSConfiguration `json:"kms,omitempty"` +} + +// AESConfiguration contains the API configuration for an AES transformer. +type AESConfiguration struct { + // keys is a list of keys to be used for creating the AES transformer. + // Each key has to be 32 bytes long for AES-CBC and 16, 24 or 32 bytes for AES-GCM. + Keys []Key `json:"keys"` +} + +// SecretboxConfiguration contains the API configuration for an Secretbox transformer. +type SecretboxConfiguration struct { + // keys is a list of keys to be used for creating the Secretbox transformer. + // Each key has to be 32 bytes long. + Keys []Key `json:"keys"` +} + +// Key contains name and secret of the provided key for a transformer. +type Key struct { + // name is the name of the key to be used while storing data to disk. + Name string `json:"name"` + // secret is the actual key, encoded in base64. + Secret string `json:"secret"` +} + +// String implements Stringer interface in a log safe way. +func (k Key) String() string { + return fmt.Sprintf("Name: %s, Secret: [REDACTED]", k.Name) +} + +// IdentityConfiguration is an empty struct to allow identity transformer in provider configuration. +type IdentityConfiguration struct{} + +// KMSConfiguration contains the name, cache size and path to configuration file for a KMS based envelope transformer. +type KMSConfiguration struct { + // apiVersion of KeyManagementService + // +optional + APIVersion string `json:"apiVersion"` + // name is the name of the KMS plugin to be used. + Name string `json:"name"` + // cachesize is the maximum number of secrets which are cached in memory. The default value is 1000. + // Set to a negative value to disable caching. This field is only allowed for KMS v1 providers. + // +optional + CacheSize *int32 `json:"cachesize,omitempty"` + // endpoint is the gRPC server listening address, for example "unix:///var/run/kms-provider.sock". + Endpoint string `json:"endpoint"` + // timeout for gRPC calls to kms-plugin (ex. 5s). The default is 3 seconds. + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go new file mode 100644 index 00000000..c0f21874 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.conversion.go @@ -0,0 +1,363 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AESConfiguration)(nil), (*apiserver.AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AESConfiguration_To_apiserver_AESConfiguration(a.(*AESConfiguration), b.(*apiserver.AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AESConfiguration)(nil), (*AESConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AESConfiguration_To_v1_AESConfiguration(a.(*apiserver.AESConfiguration), b.(*AESConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EncryptionConfiguration)(nil), (*apiserver.EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(a.(*EncryptionConfiguration), b.(*apiserver.EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EncryptionConfiguration)(nil), (*EncryptionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(a.(*apiserver.EncryptionConfiguration), b.(*EncryptionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*IdentityConfiguration)(nil), (*apiserver.IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(a.(*IdentityConfiguration), b.(*apiserver.IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.IdentityConfiguration)(nil), (*IdentityConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(a.(*apiserver.IdentityConfiguration), b.(*IdentityConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*KMSConfiguration)(nil), (*apiserver.KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(a.(*KMSConfiguration), b.(*apiserver.KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.KMSConfiguration)(nil), (*KMSConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(a.(*apiserver.KMSConfiguration), b.(*KMSConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Key)(nil), (*apiserver.Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Key_To_apiserver_Key(a.(*Key), b.(*apiserver.Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Key)(nil), (*Key)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Key_To_v1_Key(a.(*apiserver.Key), b.(*Key), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ProviderConfiguration)(nil), (*apiserver.ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(a.(*ProviderConfiguration), b.(*apiserver.ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ProviderConfiguration)(nil), (*ProviderConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(a.(*apiserver.ProviderConfiguration), b.(*ProviderConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ResourceConfiguration)(nil), (*apiserver.ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(a.(*ResourceConfiguration), b.(*apiserver.ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ResourceConfiguration)(nil), (*ResourceConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(a.(*apiserver.ResourceConfiguration), b.(*ResourceConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*SecretboxConfiguration)(nil), (*apiserver.SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(a.(*SecretboxConfiguration), b.(*apiserver.SecretboxConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.SecretboxConfiguration)(nil), (*SecretboxConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(a.(*apiserver.SecretboxConfiguration), b.(*SecretboxConfiguration), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_AESConfiguration_To_apiserver_AESConfiguration(in *AESConfiguration, out *apiserver.AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]apiserver.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_AESConfiguration_To_apiserver_AESConfiguration is an autogenerated conversion function. +func Convert_v1_AESConfiguration_To_apiserver_AESConfiguration(in *AESConfiguration, out *apiserver.AESConfiguration, s conversion.Scope) error { + return autoConvert_v1_AESConfiguration_To_apiserver_AESConfiguration(in, out, s) +} + +func autoConvert_apiserver_AESConfiguration_To_v1_AESConfiguration(in *apiserver.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_apiserver_AESConfiguration_To_v1_AESConfiguration is an autogenerated conversion function. +func Convert_apiserver_AESConfiguration_To_v1_AESConfiguration(in *apiserver.AESConfiguration, out *AESConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AESConfiguration_To_v1_AESConfiguration(in, out, s) +} + +func autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function. +func Convert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_v1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionConfiguration_To_v1_AdmissionConfiguration(in, out, s) +} + +func autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_v1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in *EncryptionConfiguration, out *apiserver.EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]apiserver.ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration is an autogenerated conversion function. +func Convert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in *EncryptionConfiguration, out *apiserver.EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_v1_EncryptionConfiguration_To_apiserver_EncryptionConfiguration(in, out, s) +} + +func autoConvert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *apiserver.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + out.Resources = *(*[]ResourceConfiguration)(unsafe.Pointer(&in.Resources)) + return nil +} + +// Convert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration is an autogenerated conversion function. +func Convert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(in *apiserver.EncryptionConfiguration, out *EncryptionConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_EncryptionConfiguration_To_v1_EncryptionConfiguration(in, out, s) +} + +func autoConvert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(in *IdentityConfiguration, out *apiserver.IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration is an autogenerated conversion function. +func Convert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(in *IdentityConfiguration, out *apiserver.IdentityConfiguration, s conversion.Scope) error { + return autoConvert_v1_IdentityConfiguration_To_apiserver_IdentityConfiguration(in, out, s) +} + +func autoConvert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(in *apiserver.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return nil +} + +// Convert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration is an autogenerated conversion function. +func Convert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(in *apiserver.IdentityConfiguration, out *IdentityConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_IdentityConfiguration_To_v1_IdentityConfiguration(in, out, s) +} + +func autoConvert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(in *KMSConfiguration, out *apiserver.KMSConfiguration, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_v1_KMSConfiguration_To_apiserver_KMSConfiguration is an autogenerated conversion function. +func Convert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(in *KMSConfiguration, out *apiserver.KMSConfiguration, s conversion.Scope) error { + return autoConvert_v1_KMSConfiguration_To_apiserver_KMSConfiguration(in, out, s) +} + +func autoConvert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(in *apiserver.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.Name = in.Name + out.CacheSize = (*int32)(unsafe.Pointer(in.CacheSize)) + out.Endpoint = in.Endpoint + out.Timeout = (*metav1.Duration)(unsafe.Pointer(in.Timeout)) + return nil +} + +// Convert_apiserver_KMSConfiguration_To_v1_KMSConfiguration is an autogenerated conversion function. +func Convert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(in *apiserver.KMSConfiguration, out *KMSConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_KMSConfiguration_To_v1_KMSConfiguration(in, out, s) +} + +func autoConvert_v1_Key_To_apiserver_Key(in *Key, out *apiserver.Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_v1_Key_To_apiserver_Key is an autogenerated conversion function. +func Convert_v1_Key_To_apiserver_Key(in *Key, out *apiserver.Key, s conversion.Scope) error { + return autoConvert_v1_Key_To_apiserver_Key(in, out, s) +} + +func autoConvert_apiserver_Key_To_v1_Key(in *apiserver.Key, out *Key, s conversion.Scope) error { + out.Name = in.Name + out.Secret = in.Secret + return nil +} + +// Convert_apiserver_Key_To_v1_Key is an autogenerated conversion function. +func Convert_apiserver_Key_To_v1_Key(in *apiserver.Key, out *Key, s conversion.Scope) error { + return autoConvert_apiserver_Key_To_v1_Key(in, out, s) +} + +func autoConvert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(in *ProviderConfiguration, out *apiserver.ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*apiserver.AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*apiserver.AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*apiserver.SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*apiserver.IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*apiserver.KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration is an autogenerated conversion function. +func Convert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(in *ProviderConfiguration, out *apiserver.ProviderConfiguration, s conversion.Scope) error { + return autoConvert_v1_ProviderConfiguration_To_apiserver_ProviderConfiguration(in, out, s) +} + +func autoConvert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(in *apiserver.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + out.AESGCM = (*AESConfiguration)(unsafe.Pointer(in.AESGCM)) + out.AESCBC = (*AESConfiguration)(unsafe.Pointer(in.AESCBC)) + out.Secretbox = (*SecretboxConfiguration)(unsafe.Pointer(in.Secretbox)) + out.Identity = (*IdentityConfiguration)(unsafe.Pointer(in.Identity)) + out.KMS = (*KMSConfiguration)(unsafe.Pointer(in.KMS)) + return nil +} + +// Convert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration is an autogenerated conversion function. +func Convert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(in *apiserver.ProviderConfiguration, out *ProviderConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_ProviderConfiguration_To_v1_ProviderConfiguration(in, out, s) +} + +func autoConvert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(in *ResourceConfiguration, out *apiserver.ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]apiserver.ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration is an autogenerated conversion function. +func Convert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(in *ResourceConfiguration, out *apiserver.ResourceConfiguration, s conversion.Scope) error { + return autoConvert_v1_ResourceConfiguration_To_apiserver_ResourceConfiguration(in, out, s) +} + +func autoConvert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(in *apiserver.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.Providers = *(*[]ProviderConfiguration)(unsafe.Pointer(&in.Providers)) + return nil +} + +// Convert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration is an autogenerated conversion function. +func Convert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(in *apiserver.ResourceConfiguration, out *ResourceConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_ResourceConfiguration_To_v1_ResourceConfiguration(in, out, s) +} + +func autoConvert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(in *SecretboxConfiguration, out *apiserver.SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]apiserver.Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration is an autogenerated conversion function. +func Convert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(in *SecretboxConfiguration, out *apiserver.SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_v1_SecretboxConfiguration_To_apiserver_SecretboxConfiguration(in, out, s) +} + +func autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *apiserver.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + out.Keys = *(*[]Key)(unsafe.Pointer(&in.Keys)) + return nil +} + +// Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration is an autogenerated conversion function. +func Convert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in *apiserver.SecretboxConfiguration, out *SecretboxConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_SecretboxConfiguration_To_v1_SecretboxConfiguration(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..cbdcaa5a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.deepcopy.go @@ -0,0 +1,281 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go new file mode 100644 index 00000000..82fec011 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1/zz_generated.defaults.go @@ -0,0 +1,46 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&EncryptionConfiguration{}, func(obj interface{}) { SetObjectDefaults_EncryptionConfiguration(obj.(*EncryptionConfiguration)) }) + return nil +} + +func SetObjectDefaults_EncryptionConfiguration(in *EncryptionConfiguration) { + for i := range in.Resources { + a := &in.Resources[i] + for j := range a.Providers { + b := &a.Providers[j] + if b.KMS != nil { + SetDefaults_KMSConfiguration(b.KMS) + } + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go new file mode 100644 index 00000000..31b8b706 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/conversion.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + if err := autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in, out, s); err != nil { + return err + } + if out.Name == "master" { + out.Name = "controlplane" + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go new file mode 100644 index 00000000..a9af01fe --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/defaults.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { + if obj.AuthorizedTTL.Duration == 0 { + obj.AuthorizedTTL.Duration = 5 * time.Minute + } + if obj.UnauthorizedTTL.Duration == 0 { + obj.UnauthorizedTTL.Duration = 30 * time.Second + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go new file mode 100644 index 00000000..c9b658b2 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io +// +groupName=apiserver.config.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1alpha1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go new file mode 100644 index 00000000..7d68ac0c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/register.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiserver.k8s.io" +const ConfigGroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// ConfigSchemeGroupVersion is group version used to register these objects +var ConfigSchemeGroupVersion = schema.GroupVersion{Group: ConfigGroupName, Version: "v1alpha1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AdmissionConfiguration{}, + &EgressSelectorConfiguration{}, + ) + scheme.AddKnownTypes(ConfigSchemeGroupVersion, + &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, + &TracingConfiguration{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + metav1.AddToGroupVersion(scheme, ConfigSchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go new file mode 100644 index 00000000..214ef4e4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/types.go @@ -0,0 +1,620 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + tracingapi "k8s.io/component-base/tracing/api/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AdmissionConfiguration provides versioned configuration for admission controllers. +type AdmissionConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Plugins allows specifying a configuration per admission control plugin. + // +optional + Plugins []AdmissionPluginConfiguration `json:"plugins"` +} + +// AdmissionPluginConfiguration provides the configuration for a single plug-in. +type AdmissionPluginConfiguration struct { + // Name is the name of the admission controller. + // It must match the registered admission plugin name. + Name string `json:"name"` + + // Path is the path to a configuration file that contains the plugin's + // configuration + // +optional + Path string `json:"path"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +optional + Configuration *runtime.Unknown `json:"configuration"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressSelectorConfiguration provides versioned configuration for egress selector clients. +type EgressSelectorConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // connectionServices contains a list of egress selection client configurations + EgressSelections []EgressSelection `json:"egressSelections"` +} + +// EgressSelection provides the configuration for a single egress selection client. +type EgressSelection struct { + // name is the name of the egress selection. + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" + Name string `json:"name"` + + // connection is the exact information used to configure the egress selection + Connection Connection `json:"connection"` +} + +// Connection provides the configuration for a single egress selection client. +type Connection struct { + // Protocol is the protocol used to connect from client to the konnectivity server. + ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"` + + // Transport defines the transport configurations we use to dial to the konnectivity server. + // This is required if ProxyProtocol is HTTPConnect or GRPC. + // +optional + Transport *Transport `json:"transport,omitempty"` +} + +// ProtocolType is a set of valid values for Connection.ProtocolType +type ProtocolType string + +// Valid types for ProtocolType for konnectivity server +const ( + // Use HTTPConnect to connect to konnectivity server + ProtocolHTTPConnect ProtocolType = "HTTPConnect" + // Use grpc to connect to konnectivity server + ProtocolGRPC ProtocolType = "GRPC" + // Connect directly (skip konnectivity server) + ProtocolDirect ProtocolType = "Direct" +) + +// Transport defines the transport configurations we use to dial to the konnectivity server +type Transport struct { + // TCP is the TCP configuration for communicating with the konnectivity server via TCP + // ProxyProtocol of GRPC is not supported with TCP transport at the moment + // Requires at least one of TCP or UDS to be set + // +optional + TCP *TCPTransport `json:"tcp,omitempty"` + + // UDS is the UDS configuration for communicating with the konnectivity server via UDS + // Requires at least one of TCP or UDS to be set + // +optional + UDS *UDSTransport `json:"uds,omitempty"` +} + +// TCPTransport provides the information to connect to konnectivity server via TCP +type TCPTransport struct { + // URL is the location of the konnectivity server to connect to. + // As an example it might be "https://127.0.0.1:8131" + URL string `json:"url,omitempty"` + + // TLSConfig is the config needed to use TLS when connecting to konnectivity server + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// UDSTransport provides the information to connect to konnectivity server via UDS +type UDSTransport struct { + // UDSName is the name of the unix domain socket to connect to konnectivity server + // This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket) + UDSName string `json:"udsName,omitempty"` +} + +// TLSConfig provides the authentication information to connect to konnectivity server +// Only used with TCPTransport +type TLSConfig struct { + // caBundle is the file location of the CA to be used to determine trust with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // If absent while TCPTransport.URL is prefixed with https://, default to system trust roots. + // +optional + CABundle string `json:"caBundle,omitempty"` + + // clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientKey string `json:"clientKey,omitempty"` + + // clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientCert string `json:"clientCert,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TracingConfiguration provides versioned configuration for tracing clients. +type TracingConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Embed the component config tracing configuration struct + tracingapi.TracingConfiguration `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + // jwt is a list of authenticator to authenticate Kubernetes users using + // JWT compliant tokens. The authenticator will attempt to parse a raw ID token, + // verify it's been signed by the configured issuer. The public key to verify the + // signature is discovered from the issuer's public endpoint using OIDC discovery. + // For an incoming token, each JWT authenticator will be attempted in + // the order in which it is specified in this list. Note however that + // other authenticators may run before or after the JWT authenticators. + // The specific position of JWT authenticators in relation to other + // authenticators is neither defined nor stable across releases. Since + // each JWT authenticator must have a unique issuer URL, at most one + // JWT authenticator will attempt to cryptographically validate the token. + // + // The minimum valid JWT payload must contain the following claims: + // { + // "iss": "https://issuer.example.com", + // "aud": ["audience"], + // "exp": 1234567890, + // "": "username" + // } + JWT []JWTAuthenticator `json:"jwt"` + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition `json:"conditions,omitempty"` +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string `json:"path"` +} + +// JWTAuthenticator provides the configuration for a single JWT authenticator. +type JWTAuthenticator struct { + // issuer contains the basic OIDC provider connection options. + // +required + Issuer Issuer `json:"issuer"` + + // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // +optional + ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"` + + // claimMappings points claims of a token to be treated as user attributes. + // +required + ClaimMappings ClaimMappings `json:"claimMappings"` + + // userValidationRules are rules that are applied to final user before completing authentication. + // These allow invariants to be applied to incoming identities such as preventing the + // use of the system: prefix that is commonly used by Kubernetes components. + // The validation rules are logically ANDed together and must all return true for the validation to pass. + // +optional + UserValidationRules []UserValidationRule `json:"userValidationRules,omitempty"` +} + +// Issuer provides the configuration for an external provider's specific settings. +type Issuer struct { + // url points to the issuer URL in a format https://url or https://url/path. + // This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + // Same value as the --oidc-issuer-url flag. + // Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + // Required to be unique across all JWT authenticators. + // Note that egress selection configuration is not used for this network connection. + // +required + URL string `json:"url"` + + // discoveryURL, if specified, overrides the URL used to fetch discovery + // information instead of using "{url}/.well-known/openid-configuration". + // The exact value specified is used, so "/.well-known/openid-configuration" + // must be included in discoveryURL if needed. + // + // The "issuer" field in the fetched discovery information must match the "issuer.url" field + // in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + // This is for scenarios where the well-known and jwks endpoints are hosted at a different + // location than the issuer (such as locally in the cluster). + // + // Example: + // A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + // and discovery information is available at '/.well-known/openid-configuration'. + // discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + // certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + // must be set to 'oidc.oidc-namespace'. + // + // curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + // { + // issuer: "https://oidc.example.com" (.url field) + // } + // + // discoveryURL must be different from url. + // Required to be unique across all JWT authenticators. + // Note that egress selection configuration is not used for this network connection. + // +optional + DiscoveryURL *string `json:"discoveryURL,omitempty"` + + // certificateAuthority contains PEM-encoded certificate authority certificates + // used to validate the connection when fetching discovery information. + // If unset, the system verifier is used. + // Same value as the content of the file referenced by the --oidc-ca-file flag. + // +optional + CertificateAuthority string `json:"certificateAuthority,omitempty"` + + // audiences is the set of acceptable audiences the JWT must be issued to. + // At least one of the entries must match the "aud" claim in presented JWTs. + // Same value as the --oidc-client-id flag (though this field supports an array). + // Required to be non-empty. + // +required + Audiences []string `json:"audiences"` + + // audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + // Allowed values are: + // 1. "MatchAny" when multiple audiences are specified and + // 2. empty (or unset) or "MatchAny" when a single audience is specified. + // + // - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + // For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + // + // - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + // + // For more nuanced audience validation, use claimValidationRules. + // example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + // +optional + AudienceMatchPolicy AudienceMatchPolicyType `json:"audienceMatchPolicy,omitempty"` +} + +// AudienceMatchPolicyType is a set of valid values for issuer.audienceMatchPolicy +type AudienceMatchPolicyType string + +// Valid types for AudienceMatchPolicyType +const ( + // MatchAny means the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny" +) + +// ClaimValidationRule provides the configuration for a single claim validation rule. +type ClaimValidationRule struct { + // claim is the name of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim keys are supported. + // Mutually exclusive with expression and message. + // +optional + Claim string `json:"claim,omitempty"` + // requiredValue is the value of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim values are supported. + // If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + // Mutually exclusive with expression and message. + // +optional + RequiredValue string `json:"requiredValue,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // Must produce a boolean. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // Must return true for the validation to pass. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim and requiredValue. + // +optional + Expression string `json:"expression,omitempty"` + // message customizes the returned error message when expression returns false. + // message is a literal string. + // Mutually exclusive with claim and requiredValue. + // +optional + Message string `json:"message,omitempty"` +} + +// ClaimMappings provides the configuration for claim mapping +type ClaimMappings struct { + // username represents an option for the username attribute. + // The claim's value must be a singular string. + // Same as the --oidc-username-claim and --oidc-username-prefix flags. + // If username.expression is set, the expression must produce a string value. + // If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + // username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + // An example claim validation rule expression that matches the validation automatically + // applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true)'. + // + // In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + // the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + // For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + // For prefix: + // (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + // set username.prefix="" + // (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + // behavior using authentication config, set username.prefix="#" + // (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + // +required + Username PrefixedClaimOrExpression `json:"username"` + // groups represents an option for the groups attribute. + // The claim's value must be a string or string array claim. + // If groups.claim is set, the prefix must be specified (and can be the empty string). + // If groups.expression is set, the expression must produce a string or string array value. + // "", [], and null values are treated as the group mapping not being present. + // +optional + Groups PrefixedClaimOrExpression `json:"groups,omitempty"` + + // uid represents an option for the uid attribute. + // Claim must be a singular string claim. + // If uid.expression is set, the expression must produce a string value. + // +optional + UID ClaimOrExpression `json:"uid"` + + // extra represents an option for the extra attribute. + // expression must produce a string or string array value. + // If the value is empty, the extra mapping will not be present. + // + // hard-coded extra key/value + // - key: "foo" + // valueExpression: "'bar'" + // This will result in an extra attribute - foo: ["bar"] + // + // hard-coded key, value copying claim value + // - key: "foo" + // valueExpression: "claims.some_claim" + // This will result in an extra attribute - foo: [value of some_claim] + // + // hard-coded key, value derived from claim value + // - key: "admin" + // valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + // This will result in: + // - if is_admin claim is present and true, extra attribute - admin: ["true"] + // - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + // + // +optional + Extra []ExtraMapping `json:"extra,omitempty"` +} + +// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. +type PrefixedClaimOrExpression struct { + // claim is the JWT claim to use. + // Mutually exclusive with expression. + // +optional + Claim string `json:"claim,omitempty"` + // prefix is prepended to claim's value to prevent clashes with existing names. + // prefix needs to be set if claim is set and can be the empty string. + // Mutually exclusive with expression. + // +optional + Prefix *string `json:"prefix,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim and prefix. + // +optional + Expression string `json:"expression,omitempty"` +} + +// ClaimOrExpression provides the configuration for a single claim or expression. +type ClaimOrExpression struct { + // claim is the JWT claim to use. + // Either claim or expression must be set. + // Mutually exclusive with expression. + // +optional + Claim string `json:"claim,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim. + // +optional + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping provides the configuration for a single extra mapping. +type ExtraMapping struct { + // key is a string to use as the extra attribute key. + // key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + // subdomain as defined by RFC 1123. All characters trailing the first "/" must + // be valid HTTP Path characters as defined by RFC 3986. + // key must be lowercase. + // Required to be unique. + // +required + Key string `json:"key"` + + // valueExpression is a CEL expression to extract extra attribute value. + // valueExpression must produce a string or string array value. + // "", [], and null values are treated as the extra mapping not being present. + // Empty string values contained within a string array are filtered out. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // +required + ValueExpression string `json:"valueExpression"` +} + +// UserValidationRule provides the configuration for a single user info validation rule. +type UserValidationRule struct { + // expression represents the expression which will be evaluated by CEL. + // Must return true for the validation to pass. + // + // CEL expressions have access to the contents of UserInfo, organized into CEL variable: + // - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + // Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + // API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // +required + Expression string `json:"expression"` + + // message customizes the returned error message when rule returns false. + // message is a literal string. + // +optional + Message string `json:"message,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type string `json:"type"` + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string `json:"name"` + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + // Must not be defined when Type!=Webhook + Webhook *WebhookConfiguration `json:"webhook,omitempty"` +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration `json:"authorizedTTL"` + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration `json:"timeout"` + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string `json:"failurePolicy"` + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition `json:"matchConditions"` +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string `json:"type"` + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string `json:"kubeConfigFile"` +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string `json:"expression"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go new file mode 100644 index 00000000..3a6c66c3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,964 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AdmissionConfiguration)(nil), (*apiserver.AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(a.(*AdmissionConfiguration), b.(*apiserver.AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionConfiguration)(nil), (*AdmissionConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(a.(*apiserver.AdmissionConfiguration), b.(*AdmissionConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AdmissionPluginConfiguration)(nil), (*apiserver.AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(a.(*AdmissionPluginConfiguration), b.(*apiserver.AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AdmissionPluginConfiguration)(nil), (*AdmissionPluginConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(a.(*apiserver.AdmissionPluginConfiguration), b.(*AdmissionPluginConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthenticationConfiguration)(nil), (*AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(a.(*apiserver.AuthenticationConfiguration), b.(*AuthenticationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimMappings)(nil), (*ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(a.(*apiserver.ClaimMappings), b.(*ClaimMappings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimOrExpression)(nil), (*apiserver.ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(a.(*ClaimOrExpression), b.(*apiserver.ClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimOrExpression)(nil), (*ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(a.(*apiserver.ClaimOrExpression), b.(*ClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimValidationRule)(nil), (*apiserver.ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(a.(*ClaimValidationRule), b.(*apiserver.ClaimValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimValidationRule)(nil), (*ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(a.(*apiserver.ClaimValidationRule), b.(*ClaimValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Connection)(nil), (*Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Connection_To_v1alpha1_Connection(a.(*apiserver.Connection), b.(*Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EgressSelectorConfiguration)(nil), (*apiserver.EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(a.(*EgressSelectorConfiguration), b.(*apiserver.EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelectorConfiguration)(nil), (*EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(a.(*apiserver.EgressSelectorConfiguration), b.(*EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExtraMapping)(nil), (*apiserver.ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(a.(*ExtraMapping), b.(*apiserver.ExtraMapping), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ExtraMapping)(nil), (*ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(a.(*apiserver.ExtraMapping), b.(*ExtraMapping), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Issuer)(nil), (*apiserver.Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Issuer_To_apiserver_Issuer(a.(*Issuer), b.(*apiserver.Issuer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Issuer)(nil), (*Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Issuer_To_v1alpha1_Issuer(a.(*apiserver.Issuer), b.(*Issuer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JWTAuthenticator)(nil), (*apiserver.JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(a.(*JWTAuthenticator), b.(*apiserver.JWTAuthenticator), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.JWTAuthenticator)(nil), (*JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(a.(*apiserver.JWTAuthenticator), b.(*JWTAuthenticator), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PrefixedClaimOrExpression)(nil), (*apiserver.PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(a.(*PrefixedClaimOrExpression), b.(*apiserver.PrefixedClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.PrefixedClaimOrExpression)(nil), (*PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(a.(*apiserver.PrefixedClaimOrExpression), b.(*PrefixedClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*apiserver.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(a.(*TracingConfiguration), b.(*apiserver.TracingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(a.(*apiserver.TracingConfiguration), b.(*TracingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Transport_To_v1alpha1_Transport(a.(*apiserver.Transport), b.(*Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*UserValidationRule)(nil), (*apiserver.UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(a.(*UserValidationRule), b.(*apiserver.UserValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UserValidationRule)(nil), (*UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(a.(*apiserver.UserValidationRule), b.(*UserValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]apiserver.AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + out.Plugins = *(*[]AdmissionPluginConfiguration)(unsafe.Pointer(&in.Plugins)) + return nil +} + +// Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + out.Name = in.Name + out.Path = in.Path + out.Configuration = (*runtime.Unknown)(unsafe.Pointer(in.Configuration)) + return nil +} + +// Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration is an autogenerated conversion function. +func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthCondition_To_v1alpha1_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthConfig_To_v1alpha1_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]apiserver.JWTAuthenticator, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JWT = nil + } + out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) + return nil +} + +// Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + if err := Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JWT = nil + } + out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) + return nil +} + +// Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthenticationConfiguration_To_v1alpha1_AuthenticationConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizationConfiguration_To_v1alpha1_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + out.Type = apiserver.AuthorizerType(in.Type) + out.Name = in.Name + out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + out.Type = string(in.Type) + out.Name = in.Name + out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizerConfiguration_To_v1alpha1_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { + if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { + return err + } + if err := Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { + return err + } + out.Extra = *(*[]apiserver.ExtraMapping)(unsafe.Pointer(&in.Extra)) + return nil +} + +// Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings is an autogenerated conversion function. +func Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { + return autoConvert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(in, out, s) +} + +func autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { + if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { + return err + } + if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { + return err + } + out.Extra = *(*[]ExtraMapping)(unsafe.Pointer(&in.Extra)) + return nil +} + +// Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings is an autogenerated conversion function. +func Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { + return autoConvert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(in, out, s) +} + +func autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression is an autogenerated conversion function. +func Convert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { + return autoConvert_v1alpha1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in, out, s) +} + +func autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression is an autogenerated conversion function. +func Convert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { + return autoConvert_apiserver_ClaimOrExpression_To_v1alpha1_ClaimOrExpression(in, out, s) +} + +func autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { + out.Claim = in.Claim + out.RequiredValue = in.RequiredValue + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule is an autogenerated conversion function. +func Convert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { + return autoConvert_v1alpha1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in, out, s) +} + +func autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { + out.Claim = in.Claim + out.RequiredValue = in.RequiredValue + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule is an autogenerated conversion function. +func Convert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { + return autoConvert_apiserver_ClaimValidationRule_To_v1alpha1_ClaimValidationRule(in, out, s) +} + +func autoConvert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol) + out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_v1alpha1_Connection_To_apiserver_Connection is an autogenerated conversion function. +func Convert_v1alpha1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + return autoConvert_v1alpha1_Connection_To_apiserver_Connection(in, out, s) +} + +func autoConvert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + out.ProxyProtocol = ProtocolType(in.ProxyProtocol) + out.Transport = (*Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_apiserver_Connection_To_v1alpha1_Connection is an autogenerated conversion function. +func Convert_apiserver_Connection_To_v1alpha1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + return autoConvert_apiserver_Connection_To_v1alpha1_Connection(in, out, s) +} + +func autoConvert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1alpha1_Connection_To_apiserver_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +func autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_apiserver_Connection_To_v1alpha1_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +// Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection is an autogenerated conversion function. +func Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(in, out, s) +} + +func autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]apiserver.EgressSelection, len(*in)) + for i := range *in { + if err := Convert_v1alpha1_EgressSelection_To_apiserver_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } + return nil +} + +// Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + if err := Convert_apiserver_EgressSelection_To_v1alpha1_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } + return nil +} + +// Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelectorConfiguration_To_v1alpha1_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { + out.Key = in.Key + out.ValueExpression = in.ValueExpression + return nil +} + +// Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping is an autogenerated conversion function. +func Convert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { + return autoConvert_v1alpha1_ExtraMapping_To_apiserver_ExtraMapping(in, out, s) +} + +func autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { + out.Key = in.Key + out.ValueExpression = in.ValueExpression + return nil +} + +// Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping is an autogenerated conversion function. +func Convert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { + return autoConvert_apiserver_ExtraMapping_To_v1alpha1_ExtraMapping(in, out, s) +} + +func autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { + out.URL = in.URL + if err := v1.Convert_Pointer_string_To_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil { + return err + } + out.CertificateAuthority = in.CertificateAuthority + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) + out.AudienceMatchPolicy = apiserver.AudienceMatchPolicyType(in.AudienceMatchPolicy) + return nil +} + +// Convert_v1alpha1_Issuer_To_apiserver_Issuer is an autogenerated conversion function. +func Convert_v1alpha1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { + return autoConvert_v1alpha1_Issuer_To_apiserver_Issuer(in, out, s) +} + +func autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { + out.URL = in.URL + if err := v1.Convert_string_To_Pointer_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil { + return err + } + out.CertificateAuthority = in.CertificateAuthority + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) + out.AudienceMatchPolicy = AudienceMatchPolicyType(in.AudienceMatchPolicy) + return nil +} + +// Convert_apiserver_Issuer_To_v1alpha1_Issuer is an autogenerated conversion function. +func Convert_apiserver_Issuer_To_v1alpha1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { + return autoConvert_apiserver_Issuer_To_v1alpha1_Issuer(in, out, s) +} + +func autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { + if err := Convert_v1alpha1_Issuer_To_apiserver_Issuer(&in.Issuer, &out.Issuer, s); err != nil { + return err + } + out.ClaimValidationRules = *(*[]apiserver.ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) + if err := Convert_v1alpha1_ClaimMappings_To_apiserver_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { + return err + } + out.UserValidationRules = *(*[]apiserver.UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) + return nil +} + +// Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator is an autogenerated conversion function. +func Convert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { + return autoConvert_v1alpha1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in, out, s) +} + +func autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { + if err := Convert_apiserver_Issuer_To_v1alpha1_Issuer(&in.Issuer, &out.Issuer, s); err != nil { + return err + } + out.ClaimValidationRules = *(*[]ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) + if err := Convert_apiserver_ClaimMappings_To_v1alpha1_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { + return err + } + out.UserValidationRules = *(*[]UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) + return nil +} + +// Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator is an autogenerated conversion function. +func Convert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { + return autoConvert_apiserver_JWTAuthenticator_To_v1alpha1_JWTAuthenticator(in, out, s) +} + +func autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression is an autogenerated conversion function. +func Convert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { + return autoConvert_v1alpha1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in, out, s) +} + +func autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression is an autogenerated conversion function. +func Convert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { + return autoConvert_apiserver_PrefixedClaimOrExpression_To_v1alpha1_PrefixedClaimOrExpression(in, out, s) +} + +func autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function. +func Convert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + return autoConvert_v1alpha1_TCPTransport_To_apiserver_TCPTransport(in, out, s) +} + +func autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport is an autogenerated conversion function. +func Convert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + return autoConvert_apiserver_TCPTransport_To_v1alpha1_TCPTransport(in, out, s) +} + +func autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function. +func Convert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + return autoConvert_v1alpha1_TLSConfig_To_apiserver_TLSConfig(in, out, s) +} + +func autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig is an autogenerated conversion function. +func Convert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + return autoConvert_apiserver_TLSConfig_To_v1alpha1_TLSConfig(in, out, s) +} + +func autoConvert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error { + out.TracingConfiguration = in.TracingConfiguration + return nil +} + +// Convert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_TracingConfiguration_To_apiserver_TracingConfiguration(in, out, s) +} + +func autoConvert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error { + out.TracingConfiguration = in.TracingConfiguration + return nil +} + +// Convert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration is an autogenerated conversion function. +func Convert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_TracingConfiguration_To_v1alpha1_TracingConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_v1alpha1_Transport_To_apiserver_Transport is an autogenerated conversion function. +func Convert_v1alpha1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + return autoConvert_v1alpha1_Transport_To_apiserver_Transport(in, out, s) +} + +func autoConvert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_apiserver_Transport_To_v1alpha1_Transport is an autogenerated conversion function. +func Convert_apiserver_Transport_To_v1alpha1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + return autoConvert_apiserver_Transport_To_v1alpha1_Transport(in, out, s) +} + +func autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function. +func Convert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + return autoConvert_v1alpha1_UDSTransport_To_apiserver_UDSTransport(in, out, s) +} + +func autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport is an autogenerated conversion function. +func Convert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + return autoConvert_apiserver_UDSTransport_To_v1alpha1_UDSTransport(in, out, s) +} + +func autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule is an autogenerated conversion function. +func Convert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { + return autoConvert_v1alpha1_UserValidationRule_To_apiserver_UserValidationRule(in, out, s) +} + +func autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule is an autogenerated conversion function. +func Convert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { + return autoConvert_apiserver_UserValidationRule_To_v1alpha1_UserValidationRule(in, out, s) +} + +func autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. +func Convert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) +} + +func autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration is an autogenerated conversion function. +func Convert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConfiguration_To_v1alpha1_WebhookConfiguration(in, out, s) +} + +func autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConnectionInfo_To_v1alpha1_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. +func Convert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_v1alpha1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) +} + +func autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition is an autogenerated conversion function. +func Convert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_apiserver_WebhookMatchCondition_To_v1alpha1_WebhookMatchCondition(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..81b65225 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,606 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. +func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { + if in == nil { + return nil + } + out := new(AuthenticationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Groups.DeepCopyInto(&out.Groups) + out.UID = in.UID + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. +func (in *ClaimMappings) DeepCopy() *ClaimMappings { + if in == nil { + return nil + } + out := new(ClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. +func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { + if in == nil { + return nil + } + out := new(ClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. +func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { + if in == nil { + return nil + } + out := new(ClaimValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(Transport) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { + *out = *in + in.Connection.DeepCopyInto(&out.Connection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. +func (in *EgressSelection) DeepCopy() *EgressSelection { + if in == nil { + return nil + } + out := new(EgressSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. +func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { + if in == nil { + return nil + } + out := new(EgressSelectorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Issuer) DeepCopyInto(out *Issuer) { + *out = *in + if in.DiscoveryURL != nil { + in, out := &in.DiscoveryURL, &out.DiscoveryURL + *out = new(string) + **out = **in + } + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. +func (in *Issuer) DeepCopy() *Issuer { + if in == nil { + return nil + } + out := new(Issuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]ClaimValidationRule, len(*in)) + copy(*out, *in) + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]UserValidationRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. +func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { + if in == nil { + return nil + } + out := new(JWTAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. +func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { + if in == nil { + return nil + } + out := new(PrefixedClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. +func (in *TCPTransport) DeepCopy() *TCPTransport { + if in == nil { + return nil + } + out := new(TCPTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.TracingConfiguration.DeepCopyInto(&out.TracingConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration. +func (in *TracingConfiguration) DeepCopy() *TracingConfiguration { + if in == nil { + return nil + } + out := new(TracingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TracingConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transport) DeepCopyInto(out *Transport) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPTransport) + (*in).DeepCopyInto(*out) + } + if in.UDS != nil { + in, out := &in.UDS, &out.UDS + *out = new(UDSTransport) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. +func (in *Transport) DeepCopy() *Transport { + if in == nil { + return nil + } + out := new(Transport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. +func (in *UDSTransport) DeepCopy() *UDSTransport { + if in == nil { + return nil + } + out := new(UDSTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. +func (in *UserValidationRule) DeepCopy() *UserValidationRule { + if in == nil { + return nil + } + out := new(UserValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go new file mode 100644 index 00000000..fc76be0f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,43 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) + return nil +} + +func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { + for i := range in.Authorizers { + a := &in.Authorizers[i] + if a.Webhook != nil { + SetDefaults_WebhookConfiguration(a.Webhook) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go new file mode 100644 index 00000000..d7110eff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/conversion.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + if err := autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in, out, s); err != nil { + return err + } + if out.Name == "master" { + out.Name = "controlplane" + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go new file mode 100644 index 00000000..eebcb6c0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/defaults.go @@ -0,0 +1,36 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +func SetDefaults_WebhookConfiguration(obj *WebhookConfiguration) { + if obj.AuthorizedTTL.Duration == 0 { + obj.AuthorizedTTL.Duration = 5 * time.Minute + } + if obj.UnauthorizedTTL.Duration == 0 { + obj.UnauthorizedTTL.Duration = 30 * time.Second + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go new file mode 100644 index 00000000..07321e77 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/apiserver +// +k8s:defaulter-gen=TypeMeta +// +groupName=apiserver.k8s.io + +// Package v1beta1 is the v1beta1 version of the API. +package v1beta1 // import "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go new file mode 100644 index 00000000..3718a85d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "apiserver.k8s.io" +const ConfigGroupName = "apiserver.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// ConfigSchemeGroupVersion is group version used to register these objects +var ConfigSchemeGroupVersion = schema.GroupVersion{Group: ConfigGroupName, Version: "v1beta1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &EgressSelectorConfiguration{}, + ) + scheme.AddKnownTypes(ConfigSchemeGroupVersion, + &AuthenticationConfiguration{}, + &AuthorizationConfiguration{}, + &TracingConfiguration{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go new file mode 100644 index 00000000..570f3c46 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/types.go @@ -0,0 +1,591 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + tracingapi "k8s.io/component-base/tracing/api/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressSelectorConfiguration provides versioned configuration for egress selector clients. +type EgressSelectorConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // connectionServices contains a list of egress selection client configurations + EgressSelections []EgressSelection `json:"egressSelections"` +} + +// EgressSelection provides the configuration for a single egress selection client. +type EgressSelection struct { + // name is the name of the egress selection. + // Currently supported values are "controlplane", "master", "etcd" and "cluster" + // The "master" egress selector is deprecated in favor of "controlplane" + Name string `json:"name"` + + // connection is the exact information used to configure the egress selection + Connection Connection `json:"connection"` +} + +// Connection provides the configuration for a single egress selection client. +type Connection struct { + // Protocol is the protocol used to connect from client to the konnectivity server. + ProxyProtocol ProtocolType `json:"proxyProtocol,omitempty"` + + // Transport defines the transport configurations we use to dial to the konnectivity server. + // This is required if ProxyProtocol is HTTPConnect or GRPC. + // +optional + Transport *Transport `json:"transport,omitempty"` +} + +// ProtocolType is a set of valid values for Connection.ProtocolType +type ProtocolType string + +// Valid types for ProtocolType for konnectivity server +const ( + // Use HTTPConnect to connect to konnectivity server + ProtocolHTTPConnect ProtocolType = "HTTPConnect" + // Use grpc to connect to konnectivity server + ProtocolGRPC ProtocolType = "GRPC" + // Connect directly (skip konnectivity server) + ProtocolDirect ProtocolType = "Direct" +) + +// Transport defines the transport configurations we use to dial to the konnectivity server +type Transport struct { + // TCP is the TCP configuration for communicating with the konnectivity server via TCP + // ProxyProtocol of GRPC is not supported with TCP transport at the moment + // Requires at least one of TCP or UDS to be set + // +optional + TCP *TCPTransport `json:"tcp,omitempty"` + + // UDS is the UDS configuration for communicating with the konnectivity server via UDS + // Requires at least one of TCP or UDS to be set + // +optional + UDS *UDSTransport `json:"uds,omitempty"` +} + +// TCPTransport provides the information to connect to konnectivity server via TCP +type TCPTransport struct { + // URL is the location of the konnectivity server to connect to. + // As an example it might be "https://127.0.0.1:8131" + URL string `json:"url,omitempty"` + + // TLSConfig is the config needed to use TLS when connecting to konnectivity server + // +optional + TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` +} + +// UDSTransport provides the information to connect to konnectivity server via UDS +type UDSTransport struct { + // UDSName is the name of the unix domain socket to connect to konnectivity server + // This does not use a unix:// prefix. (Eg: /etc/srv/kubernetes/konnectivity-server/konnectivity-server.socket) + UDSName string `json:"udsName,omitempty"` +} + +// TLSConfig provides the authentication information to connect to konnectivity server +// Only used with TCPTransport +type TLSConfig struct { + // caBundle is the file location of the CA to be used to determine trust with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // If absent while TCPTransport.URL is prefixed with https://, default to system trust roots. + // +optional + CABundle string `json:"caBundle,omitempty"` + + // clientKey is the file location of the client key to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientKey string `json:"clientKey,omitempty"` + + // clientCert is the file location of the client certificate to be used in mtls handshakes with the konnectivity server. + // Must be absent/empty if TCPTransport.URL is prefixed with http:// + // Must be configured if TCPTransport.URL is prefixed with https:// + // +optional + ClientCert string `json:"clientCert,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TracingConfiguration provides versioned configuration for tracing clients. +type TracingConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // Embed the component config tracing configuration struct + tracingapi.TracingConfiguration `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + // jwt is a list of authenticator to authenticate Kubernetes users using + // JWT compliant tokens. The authenticator will attempt to parse a raw ID token, + // verify it's been signed by the configured issuer. The public key to verify the + // signature is discovered from the issuer's public endpoint using OIDC discovery. + // For an incoming token, each JWT authenticator will be attempted in + // the order in which it is specified in this list. Note however that + // other authenticators may run before or after the JWT authenticators. + // The specific position of JWT authenticators in relation to other + // authenticators is neither defined nor stable across releases. Since + // each JWT authenticator must have a unique issuer URL, at most one + // JWT authenticator will attempt to cryptographically validate the token. + // + // The minimum valid JWT payload must contain the following claims: + // { + // "iss": "https://issuer.example.com", + // "aud": ["audience"], + // "exp": 1234567890, + // "": "username" + // } + JWT []JWTAuthenticator `json:"jwt"` + + // If present --anonymous-auth must not be set + Anonymous *AnonymousAuthConfig `json:"anonymous,omitempty"` +} + +// AnonymousAuthConfig provides the configuration for the anonymous authenticator. +type AnonymousAuthConfig struct { + Enabled bool `json:"enabled"` + + // If set, anonymous auth is only allowed if the request meets one of the + // conditions. + Conditions []AnonymousAuthCondition `json:"conditions,omitempty"` +} + +// AnonymousAuthCondition describes the condition under which anonymous auth +// should be enabled. +type AnonymousAuthCondition struct { + // Path for which anonymous auth is enabled. + Path string `json:"path"` +} + +// JWTAuthenticator provides the configuration for a single JWT authenticator. +type JWTAuthenticator struct { + // issuer contains the basic OIDC provider connection options. + // +required + Issuer Issuer `json:"issuer"` + + // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // +optional + ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"` + + // claimMappings points claims of a token to be treated as user attributes. + // +required + ClaimMappings ClaimMappings `json:"claimMappings"` + + // userValidationRules are rules that are applied to final user before completing authentication. + // These allow invariants to be applied to incoming identities such as preventing the + // use of the system: prefix that is commonly used by Kubernetes components. + // The validation rules are logically ANDed together and must all return true for the validation to pass. + // +optional + UserValidationRules []UserValidationRule `json:"userValidationRules,omitempty"` +} + +// Issuer provides the configuration for an external provider's specific settings. +type Issuer struct { + // url points to the issuer URL in a format https://url or https://url/path. + // This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + // Same value as the --oidc-issuer-url flag. + // Discovery information is fetched from "{url}/.well-known/openid-configuration" unless overridden by discoveryURL. + // Required to be unique across all JWT authenticators. + // Note that egress selection configuration is not used for this network connection. + // +required + URL string `json:"url"` + + // discoveryURL, if specified, overrides the URL used to fetch discovery + // information instead of using "{url}/.well-known/openid-configuration". + // The exact value specified is used, so "/.well-known/openid-configuration" + // must be included in discoveryURL if needed. + // + // The "issuer" field in the fetched discovery information must match the "issuer.url" field + // in the AuthenticationConfiguration and will be used to validate the "iss" claim in the presented JWT. + // This is for scenarios where the well-known and jwks endpoints are hosted at a different + // location than the issuer (such as locally in the cluster). + // + // Example: + // A discovery url that is exposed using kubernetes service 'oidc' in namespace 'oidc-namespace' + // and discovery information is available at '/.well-known/openid-configuration'. + // discoveryURL: "https://oidc.oidc-namespace/.well-known/openid-configuration" + // certificateAuthority is used to verify the TLS connection and the hostname on the leaf certificate + // must be set to 'oidc.oidc-namespace'. + // + // curl https://oidc.oidc-namespace/.well-known/openid-configuration (.discoveryURL field) + // { + // issuer: "https://oidc.example.com" (.url field) + // } + // + // discoveryURL must be different from url. + // Required to be unique across all JWT authenticators. + // Note that egress selection configuration is not used for this network connection. + // +optional + DiscoveryURL *string `json:"discoveryURL,omitempty"` + + // certificateAuthority contains PEM-encoded certificate authority certificates + // used to validate the connection when fetching discovery information. + // If unset, the system verifier is used. + // Same value as the content of the file referenced by the --oidc-ca-file flag. + // +optional + CertificateAuthority string `json:"certificateAuthority,omitempty"` + + // audiences is the set of acceptable audiences the JWT must be issued to. + // At least one of the entries must match the "aud" claim in presented JWTs. + // Same value as the --oidc-client-id flag (though this field supports an array). + // Required to be non-empty. + // +required + Audiences []string `json:"audiences"` + + // audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + // Allowed values are: + // 1. "MatchAny" when multiple audiences are specified and + // 2. empty (or unset) or "MatchAny" when a single audience is specified. + // + // - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + // For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + // + // - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + // + // For more nuanced audience validation, use claimValidationRules. + // example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + // +optional + AudienceMatchPolicy AudienceMatchPolicyType `json:"audienceMatchPolicy,omitempty"` +} + +// AudienceMatchPolicyType is a set of valid values for issuer.audienceMatchPolicy +type AudienceMatchPolicyType string + +// Valid types for AudienceMatchPolicyType +const ( + // MatchAny means the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny" +) + +// ClaimValidationRule provides the configuration for a single claim validation rule. +type ClaimValidationRule struct { + // claim is the name of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim keys are supported. + // Mutually exclusive with expression and message. + // +optional + Claim string `json:"claim,omitempty"` + // requiredValue is the value of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim values are supported. + // If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + // Mutually exclusive with expression and message. + // +optional + RequiredValue string `json:"requiredValue,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // Must produce a boolean. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // Must return true for the validation to pass. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim and requiredValue. + // +optional + Expression string `json:"expression,omitempty"` + // message customizes the returned error message when expression returns false. + // message is a literal string. + // Mutually exclusive with claim and requiredValue. + // +optional + Message string `json:"message,omitempty"` +} + +// ClaimMappings provides the configuration for claim mapping +type ClaimMappings struct { + // username represents an option for the username attribute. + // The claim's value must be a singular string. + // Same as the --oidc-username-claim and --oidc-username-prefix flags. + // If username.expression is set, the expression must produce a string value. + // If username.expression uses 'claims.email', then 'claims.email_verified' must be used in + // username.expression or extra[*].valueExpression or claimValidationRules[*].expression. + // An example claim validation rule expression that matches the validation automatically + // applied when username.claim is set to 'email' is 'claims.?email_verified.orValue(true)'. + // + // In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + // the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + // For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + // For prefix: + // (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + // set username.prefix="" + // (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + // behavior using authentication config, set username.prefix="#" + // (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + // +required + Username PrefixedClaimOrExpression `json:"username"` + // groups represents an option for the groups attribute. + // The claim's value must be a string or string array claim. + // If groups.claim is set, the prefix must be specified (and can be the empty string). + // If groups.expression is set, the expression must produce a string or string array value. + // "", [], and null values are treated as the group mapping not being present. + // +optional + Groups PrefixedClaimOrExpression `json:"groups,omitempty"` + + // uid represents an option for the uid attribute. + // Claim must be a singular string claim. + // If uid.expression is set, the expression must produce a string value. + // +optional + UID ClaimOrExpression `json:"uid"` + + // extra represents an option for the extra attribute. + // expression must produce a string or string array value. + // If the value is empty, the extra mapping will not be present. + // + // hard-coded extra key/value + // - key: "foo" + // valueExpression: "'bar'" + // This will result in an extra attribute - foo: ["bar"] + // + // hard-coded key, value copying claim value + // - key: "foo" + // valueExpression: "claims.some_claim" + // This will result in an extra attribute - foo: [value of some_claim] + // + // hard-coded key, value derived from claim value + // - key: "admin" + // valueExpression: '(has(claims.is_admin) && claims.is_admin) ? "true":""' + // This will result in: + // - if is_admin claim is present and true, extra attribute - admin: ["true"] + // - if is_admin claim is present and false or is_admin claim is not present, no extra attribute will be added + // + // +optional + Extra []ExtraMapping `json:"extra,omitempty"` +} + +// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. +type PrefixedClaimOrExpression struct { + // claim is the JWT claim to use. + // Mutually exclusive with expression. + // +optional + Claim string `json:"claim,omitempty"` + // prefix is prepended to claim's value to prevent clashes with existing names. + // prefix needs to be set if claim is set and can be the empty string. + // Mutually exclusive with expression. + // +optional + Prefix *string `json:"prefix,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim and prefix. + // +optional + Expression string `json:"expression,omitempty"` +} + +// ClaimOrExpression provides the configuration for a single claim or expression. +type ClaimOrExpression struct { + // claim is the JWT claim to use. + // Either claim or expression must be set. + // Mutually exclusive with expression. + // +optional + Claim string `json:"claim,omitempty"` + + // expression represents the expression which will be evaluated by CEL. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // Mutually exclusive with claim. + // +optional + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping provides the configuration for a single extra mapping. +type ExtraMapping struct { + // key is a string to use as the extra attribute key. + // key must be a domain-prefix path (e.g. example.org/foo). All characters before the first "/" must be a valid + // subdomain as defined by RFC 1123. All characters trailing the first "/" must + // be valid HTTP Path characters as defined by RFC 3986. + // key must be lowercase. + // Required to be unique. + // +required + Key string `json:"key"` + + // valueExpression is a CEL expression to extract extra attribute value. + // valueExpression must produce a string or string array value. + // "", [], and null values are treated as the extra mapping not being present. + // Empty string values contained within a string array are filtered out. + // + // CEL expressions have access to the contents of the token claims, organized into CEL variable: + // - 'claims' is a map of claim names to claim values. + // For example, a variable named 'sub' can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation, e.g. 'claims.foo.bar'. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // +required + ValueExpression string `json:"valueExpression"` +} + +// UserValidationRule provides the configuration for a single user info validation rule. +type UserValidationRule struct { + // expression represents the expression which will be evaluated by CEL. + // Must return true for the validation to pass. + // + // CEL expressions have access to the contents of UserInfo, organized into CEL variable: + // - 'user' - authentication.k8s.io/v1, Kind=UserInfo object + // Refer to https://github.com/kubernetes/api/blob/release-1.28/authentication/v1/types.go#L105-L122 for the definition. + // API documentation: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#userinfo-v1-authentication-k8s-io + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + // + // +required + Expression string `json:"expression"` + + // message customizes the returned error message when rule returns false. + // message is a literal string. + // +optional + Message string `json:"message,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type AuthorizationConfiguration struct { + metav1.TypeMeta + + // Authorizers is an ordered list of authorizers to + // authorize requests against. + // This is similar to the --authorization-modes kube-apiserver flag + // Must be at least one. + Authorizers []AuthorizerConfiguration `json:"authorizers"` +} + +const ( + TypeWebhook AuthorizerType = "Webhook" + FailurePolicyNoOpinion string = "NoOpinion" + FailurePolicyDeny string = "Deny" + AuthorizationWebhookConnectionInfoTypeKubeConfigFile string = "KubeConfigFile" + AuthorizationWebhookConnectionInfoTypeInCluster string = "InClusterConfig" +) + +type AuthorizerType string + +type AuthorizerConfiguration struct { + // Type refers to the type of the authorizer + // "Webhook" is supported in the generic API server + // Other API servers may support additional authorizer + // types like Node, RBAC, ABAC, etc. + Type string `json:"type"` + + // Name used to describe the webhook + // This is explicitly used in monitoring machinery for metrics + // Note: Names must be DNS1123 labels like `myauthorizername` or + // subdomains like `myauthorizer.example.domain` + // Required, with no default + Name string `json:"name"` + + // Webhook defines the configuration for a Webhook authorizer + // Must be defined when Type=Webhook + // Must not be defined when Type!=Webhook + Webhook *WebhookConfiguration `json:"webhook,omitempty"` +} + +type WebhookConfiguration struct { + // The duration to cache 'authorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-authorized-ttl` flag + // Default: 5m0s + AuthorizedTTL metav1.Duration `json:"authorizedTTL"` + // The duration to cache 'unauthorized' responses from the webhook + // authorizer. + // Same as setting `--authorization-webhook-cache-unauthorized-ttl` flag + // Default: 30s + UnauthorizedTTL metav1.Duration `json:"unauthorizedTTL"` + // Timeout for the webhook request + // Maximum allowed value is 30s. + // Required, no default value. + Timeout metav1.Duration `json:"timeout"` + // The API version of the authorization.k8s.io SubjectAccessReview to + // send to and expect from the webhook. + // Same as setting `--authorization-webhook-version` flag + // Valid values: v1beta1, v1 + // Required, no default value + SubjectAccessReviewVersion string `json:"subjectAccessReviewVersion"` + // MatchConditionSubjectAccessReviewVersion specifies the SubjectAccessReview + // version the CEL expressions are evaluated against + // Valid values: v1 + // Required, no default value + MatchConditionSubjectAccessReviewVersion string `json:"matchConditionSubjectAccessReviewVersion"` + // Controls the authorization decision when a webhook request fails to + // complete or returns a malformed response or errors evaluating + // matchConditions. + // Valid values: + // - NoOpinion: continue to subsequent authorizers to see if one of + // them allows the request + // - Deny: reject the request without consulting subsequent authorizers + // Required, with no default. + FailurePolicy string `json:"failurePolicy"` + + // ConnectionInfo defines how we talk to the webhook + ConnectionInfo WebhookConnectionInfo `json:"connectionInfo"` + + // matchConditions is a list of conditions that must be met for a request to be sent to this + // webhook. An empty list of matchConditions matches all requests. + // There are a maximum of 64 match conditions allowed. + // + // The exact matching logic is (in order): + // 1. If at least one matchCondition evaluates to FALSE, then the webhook is skipped. + // 2. If ALL matchConditions evaluate to TRUE, then the webhook is called. + // 3. If at least one matchCondition evaluates to an error (but none are FALSE): + // - If failurePolicy=Deny, then the webhook rejects the request + // - If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + MatchConditions []WebhookMatchCondition `json:"matchConditions"` +} + +type WebhookConnectionInfo struct { + // Controls how the webhook should communicate with the server. + // Valid values: + // - KubeConfigFile: use the file specified in kubeConfigFile to locate the + // server. + // - InClusterConfig: use the in-cluster configuration to call the + // SubjectAccessReview API hosted by kube-apiserver. This mode is not + // allowed for kube-apiserver. + Type string `json:"type"` + + // Path to KubeConfigFile for connection info + // Required, if connectionInfo.Type is KubeConfig + KubeConfigFile *string `json:"kubeConfigFile"` +} + +type WebhookMatchCondition struct { + // expression represents the expression which will be evaluated by CEL. Must evaluate to bool. + // CEL expressions have access to the contents of the SubjectAccessReview in v1 version. + // If version specified by subjectAccessReviewVersion in the request variable is v1beta1, + // the contents would be converted to the v1 version before evaluating the CEL expression. + // + // Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ + Expression string `json:"expression"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go new file mode 100644 index 00000000..30ef049d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.conversion.go @@ -0,0 +1,900 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiserver "k8s.io/apiserver/pkg/apis/apiserver" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*AnonymousAuthCondition)(nil), (*apiserver.AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(a.(*AnonymousAuthCondition), b.(*apiserver.AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthCondition)(nil), (*AnonymousAuthCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(a.(*apiserver.AnonymousAuthCondition), b.(*AnonymousAuthCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AnonymousAuthConfig)(nil), (*apiserver.AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(a.(*AnonymousAuthConfig), b.(*apiserver.AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AnonymousAuthConfig)(nil), (*AnonymousAuthConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(a.(*apiserver.AnonymousAuthConfig), b.(*AnonymousAuthConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthenticationConfiguration)(nil), (*apiserver.AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(a.(*AuthenticationConfiguration), b.(*apiserver.AuthenticationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthenticationConfiguration)(nil), (*AuthenticationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(a.(*apiserver.AuthenticationConfiguration), b.(*AuthenticationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizationConfiguration)(nil), (*apiserver.AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(a.(*AuthorizationConfiguration), b.(*apiserver.AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizationConfiguration)(nil), (*AuthorizationConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(a.(*apiserver.AuthorizationConfiguration), b.(*AuthorizationConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*AuthorizerConfiguration)(nil), (*apiserver.AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(a.(*AuthorizerConfiguration), b.(*apiserver.AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.AuthorizerConfiguration)(nil), (*AuthorizerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(a.(*apiserver.AuthorizerConfiguration), b.(*AuthorizerConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimMappings)(nil), (*apiserver.ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(a.(*ClaimMappings), b.(*apiserver.ClaimMappings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimMappings)(nil), (*ClaimMappings)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(a.(*apiserver.ClaimMappings), b.(*ClaimMappings), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimOrExpression)(nil), (*apiserver.ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(a.(*ClaimOrExpression), b.(*apiserver.ClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimOrExpression)(nil), (*ClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(a.(*apiserver.ClaimOrExpression), b.(*ClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ClaimValidationRule)(nil), (*apiserver.ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(a.(*ClaimValidationRule), b.(*apiserver.ClaimValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ClaimValidationRule)(nil), (*ClaimValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(a.(*apiserver.ClaimValidationRule), b.(*ClaimValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Connection)(nil), (*apiserver.Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Connection_To_apiserver_Connection(a.(*Connection), b.(*apiserver.Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Connection)(nil), (*Connection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Connection_To_v1beta1_Connection(a.(*apiserver.Connection), b.(*Connection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelection)(nil), (*EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(a.(*apiserver.EgressSelection), b.(*EgressSelection), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EgressSelectorConfiguration)(nil), (*apiserver.EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(a.(*EgressSelectorConfiguration), b.(*apiserver.EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.EgressSelectorConfiguration)(nil), (*EgressSelectorConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(a.(*apiserver.EgressSelectorConfiguration), b.(*EgressSelectorConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExtraMapping)(nil), (*apiserver.ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(a.(*ExtraMapping), b.(*apiserver.ExtraMapping), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.ExtraMapping)(nil), (*ExtraMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(a.(*apiserver.ExtraMapping), b.(*ExtraMapping), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Issuer)(nil), (*apiserver.Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Issuer_To_apiserver_Issuer(a.(*Issuer), b.(*apiserver.Issuer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Issuer)(nil), (*Issuer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Issuer_To_v1beta1_Issuer(a.(*apiserver.Issuer), b.(*Issuer), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*JWTAuthenticator)(nil), (*apiserver.JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(a.(*JWTAuthenticator), b.(*apiserver.JWTAuthenticator), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.JWTAuthenticator)(nil), (*JWTAuthenticator)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(a.(*apiserver.JWTAuthenticator), b.(*JWTAuthenticator), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PrefixedClaimOrExpression)(nil), (*apiserver.PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(a.(*PrefixedClaimOrExpression), b.(*apiserver.PrefixedClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.PrefixedClaimOrExpression)(nil), (*PrefixedClaimOrExpression)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(a.(*apiserver.PrefixedClaimOrExpression), b.(*PrefixedClaimOrExpression), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TCPTransport)(nil), (*apiserver.TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport(a.(*TCPTransport), b.(*apiserver.TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TCPTransport)(nil), (*TCPTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport(a.(*apiserver.TCPTransport), b.(*TCPTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TLSConfig)(nil), (*apiserver.TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig(a.(*TLSConfig), b.(*apiserver.TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TLSConfig)(nil), (*TLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig(a.(*apiserver.TLSConfig), b.(*TLSConfig), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TracingConfiguration)(nil), (*apiserver.TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(a.(*TracingConfiguration), b.(*apiserver.TracingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.TracingConfiguration)(nil), (*TracingConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(a.(*apiserver.TracingConfiguration), b.(*TracingConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Transport)(nil), (*apiserver.Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Transport_To_apiserver_Transport(a.(*Transport), b.(*apiserver.Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.Transport)(nil), (*Transport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_Transport_To_v1beta1_Transport(a.(*apiserver.Transport), b.(*Transport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*UDSTransport)(nil), (*apiserver.UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport(a.(*UDSTransport), b.(*apiserver.UDSTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UDSTransport)(nil), (*UDSTransport)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport(a.(*apiserver.UDSTransport), b.(*UDSTransport), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*UserValidationRule)(nil), (*apiserver.UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(a.(*UserValidationRule), b.(*apiserver.UserValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.UserValidationRule)(nil), (*UserValidationRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(a.(*apiserver.UserValidationRule), b.(*UserValidationRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConfiguration)(nil), (*apiserver.WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(a.(*WebhookConfiguration), b.(*apiserver.WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConfiguration)(nil), (*WebhookConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(a.(*apiserver.WebhookConfiguration), b.(*WebhookConfiguration), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookConnectionInfo)(nil), (*apiserver.WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(a.(*WebhookConnectionInfo), b.(*apiserver.WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookConnectionInfo)(nil), (*WebhookConnectionInfo)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(a.(*apiserver.WebhookConnectionInfo), b.(*WebhookConnectionInfo), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*WebhookMatchCondition)(nil), (*apiserver.WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(a.(*WebhookMatchCondition), b.(*apiserver.WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiserver.WebhookMatchCondition)(nil), (*WebhookMatchCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(a.(*apiserver.WebhookMatchCondition), b.(*WebhookMatchCondition), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*EgressSelection)(nil), (*apiserver.EgressSelection)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(a.(*EgressSelection), b.(*apiserver.EgressSelection), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in *AnonymousAuthCondition, out *apiserver.AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_v1beta1_AnonymousAuthCondition_To_apiserver_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in *apiserver.AnonymousAuthCondition, out *AnonymousAuthCondition, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthCondition_To_v1beta1_AnonymousAuthCondition(in, out, s) +} + +func autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]apiserver.AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in *AnonymousAuthConfig, out *apiserver.AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_v1beta1_AnonymousAuthConfig_To_apiserver_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + out.Enabled = in.Enabled + out.Conditions = *(*[]AnonymousAuthCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig is an autogenerated conversion function. +func Convert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in *apiserver.AnonymousAuthConfig, out *AnonymousAuthConfig, s conversion.Scope) error { + return autoConvert_apiserver_AnonymousAuthConfig_To_v1beta1_AnonymousAuthConfig(in, out, s) +} + +func autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]apiserver.JWTAuthenticator, len(*in)) + for i := range *in { + if err := Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JWT = nil + } + out.Anonymous = (*apiserver.AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) + return nil +} + +// Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration is an autogenerated conversion function. +func Convert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in *AuthenticationConfiguration, out *apiserver.AuthenticationConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_AuthenticationConfiguration_To_apiserver_AuthenticationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + if err := Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.JWT = nil + } + out.Anonymous = (*AnonymousAuthConfig)(unsafe.Pointer(in.Anonymous)) + return nil +} + +// Convert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(in *apiserver.AuthenticationConfiguration, out *AuthenticationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthenticationConfiguration_To_v1beta1_AuthenticationConfiguration(in, out, s) +} + +func autoConvert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]apiserver.AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in *AuthorizationConfiguration, out *apiserver.AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_AuthorizationConfiguration_To_apiserver_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + out.Authorizers = *(*[]AuthorizerConfiguration)(unsafe.Pointer(&in.Authorizers)) + return nil +} + +// Convert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(in *apiserver.AuthorizationConfiguration, out *AuthorizationConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizationConfiguration_To_v1beta1_AuthorizationConfiguration(in, out, s) +} + +func autoConvert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + out.Type = apiserver.AuthorizerType(in.Type) + out.Name = in.Name + out.Webhook = (*apiserver.WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in *AuthorizerConfiguration, out *apiserver.AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_AuthorizerConfiguration_To_apiserver_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + out.Type = string(in.Type) + out.Name = in.Name + out.Webhook = (*WebhookConfiguration)(unsafe.Pointer(in.Webhook)) + return nil +} + +// Convert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration is an autogenerated conversion function. +func Convert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(in *apiserver.AuthorizerConfiguration, out *AuthorizerConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_AuthorizerConfiguration_To_v1beta1_AuthorizerConfiguration(in, out, s) +} + +func autoConvert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { + if err := Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { + return err + } + if err := Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { + return err + } + out.Extra = *(*[]apiserver.ExtraMapping)(unsafe.Pointer(&in.Extra)) + return nil +} + +// Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings is an autogenerated conversion function. +func Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(in *ClaimMappings, out *apiserver.ClaimMappings, s conversion.Scope) error { + return autoConvert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(in, out, s) +} + +func autoConvert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { + if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(&in.Username, &out.Username, s); err != nil { + return err + } + if err := Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(&in.Groups, &out.Groups, s); err != nil { + return err + } + if err := Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(&in.UID, &out.UID, s); err != nil { + return err + } + out.Extra = *(*[]ExtraMapping)(unsafe.Pointer(&in.Extra)) + return nil +} + +// Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings is an autogenerated conversion function. +func Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(in *apiserver.ClaimMappings, out *ClaimMappings, s conversion.Scope) error { + return autoConvert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(in, out, s) +} + +func autoConvert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Expression = in.Expression + return nil +} + +// Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression is an autogenerated conversion function. +func Convert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in *ClaimOrExpression, out *apiserver.ClaimOrExpression, s conversion.Scope) error { + return autoConvert_v1beta1_ClaimOrExpression_To_apiserver_ClaimOrExpression(in, out, s) +} + +func autoConvert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression is an autogenerated conversion function. +func Convert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(in *apiserver.ClaimOrExpression, out *ClaimOrExpression, s conversion.Scope) error { + return autoConvert_apiserver_ClaimOrExpression_To_v1beta1_ClaimOrExpression(in, out, s) +} + +func autoConvert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { + out.Claim = in.Claim + out.RequiredValue = in.RequiredValue + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule is an autogenerated conversion function. +func Convert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in *ClaimValidationRule, out *apiserver.ClaimValidationRule, s conversion.Scope) error { + return autoConvert_v1beta1_ClaimValidationRule_To_apiserver_ClaimValidationRule(in, out, s) +} + +func autoConvert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { + out.Claim = in.Claim + out.RequiredValue = in.RequiredValue + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule is an autogenerated conversion function. +func Convert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(in *apiserver.ClaimValidationRule, out *ClaimValidationRule, s conversion.Scope) error { + return autoConvert_apiserver_ClaimValidationRule_To_v1beta1_ClaimValidationRule(in, out, s) +} + +func autoConvert_v1beta1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + out.ProxyProtocol = apiserver.ProtocolType(in.ProxyProtocol) + out.Transport = (*apiserver.Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_v1beta1_Connection_To_apiserver_Connection is an autogenerated conversion function. +func Convert_v1beta1_Connection_To_apiserver_Connection(in *Connection, out *apiserver.Connection, s conversion.Scope) error { + return autoConvert_v1beta1_Connection_To_apiserver_Connection(in, out, s) +} + +func autoConvert_apiserver_Connection_To_v1beta1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + out.ProxyProtocol = ProtocolType(in.ProxyProtocol) + out.Transport = (*Transport)(unsafe.Pointer(in.Transport)) + return nil +} + +// Convert_apiserver_Connection_To_v1beta1_Connection is an autogenerated conversion function. +func Convert_apiserver_Connection_To_v1beta1_Connection(in *apiserver.Connection, out *Connection, s conversion.Scope) error { + return autoConvert_apiserver_Connection_To_v1beta1_Connection(in, out, s) +} + +func autoConvert_v1beta1_EgressSelection_To_apiserver_EgressSelection(in *EgressSelection, out *apiserver.EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1beta1_Connection_To_apiserver_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +func autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_apiserver_Connection_To_v1beta1_Connection(&in.Connection, &out.Connection, s); err != nil { + return err + } + return nil +} + +// Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection is an autogenerated conversion function. +func Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in *apiserver.EgressSelection, out *EgressSelection, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelection_To_v1beta1_EgressSelection(in, out, s) +} + +func autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]apiserver.EgressSelection, len(*in)) + for i := range *in { + if err := Convert_v1beta1_EgressSelection_To_apiserver_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } + return nil +} + +// Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in *EgressSelectorConfiguration, out *apiserver.EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_EgressSelectorConfiguration_To_apiserver_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + if err := Convert_apiserver_EgressSelection_To_v1beta1_EgressSelection(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.EgressSelections = nil + } + return nil +} + +// Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration is an autogenerated conversion function. +func Convert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in *apiserver.EgressSelectorConfiguration, out *EgressSelectorConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_EgressSelectorConfiguration_To_v1beta1_EgressSelectorConfiguration(in, out, s) +} + +func autoConvert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { + out.Key = in.Key + out.ValueExpression = in.ValueExpression + return nil +} + +// Convert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping is an autogenerated conversion function. +func Convert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(in *ExtraMapping, out *apiserver.ExtraMapping, s conversion.Scope) error { + return autoConvert_v1beta1_ExtraMapping_To_apiserver_ExtraMapping(in, out, s) +} + +func autoConvert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { + out.Key = in.Key + out.ValueExpression = in.ValueExpression + return nil +} + +// Convert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping is an autogenerated conversion function. +func Convert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(in *apiserver.ExtraMapping, out *ExtraMapping, s conversion.Scope) error { + return autoConvert_apiserver_ExtraMapping_To_v1beta1_ExtraMapping(in, out, s) +} + +func autoConvert_v1beta1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { + out.URL = in.URL + if err := v1.Convert_Pointer_string_To_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil { + return err + } + out.CertificateAuthority = in.CertificateAuthority + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) + out.AudienceMatchPolicy = apiserver.AudienceMatchPolicyType(in.AudienceMatchPolicy) + return nil +} + +// Convert_v1beta1_Issuer_To_apiserver_Issuer is an autogenerated conversion function. +func Convert_v1beta1_Issuer_To_apiserver_Issuer(in *Issuer, out *apiserver.Issuer, s conversion.Scope) error { + return autoConvert_v1beta1_Issuer_To_apiserver_Issuer(in, out, s) +} + +func autoConvert_apiserver_Issuer_To_v1beta1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { + out.URL = in.URL + if err := v1.Convert_string_To_Pointer_string(&in.DiscoveryURL, &out.DiscoveryURL, s); err != nil { + return err + } + out.CertificateAuthority = in.CertificateAuthority + out.Audiences = *(*[]string)(unsafe.Pointer(&in.Audiences)) + out.AudienceMatchPolicy = AudienceMatchPolicyType(in.AudienceMatchPolicy) + return nil +} + +// Convert_apiserver_Issuer_To_v1beta1_Issuer is an autogenerated conversion function. +func Convert_apiserver_Issuer_To_v1beta1_Issuer(in *apiserver.Issuer, out *Issuer, s conversion.Scope) error { + return autoConvert_apiserver_Issuer_To_v1beta1_Issuer(in, out, s) +} + +func autoConvert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { + if err := Convert_v1beta1_Issuer_To_apiserver_Issuer(&in.Issuer, &out.Issuer, s); err != nil { + return err + } + out.ClaimValidationRules = *(*[]apiserver.ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) + if err := Convert_v1beta1_ClaimMappings_To_apiserver_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { + return err + } + out.UserValidationRules = *(*[]apiserver.UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) + return nil +} + +// Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator is an autogenerated conversion function. +func Convert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in *JWTAuthenticator, out *apiserver.JWTAuthenticator, s conversion.Scope) error { + return autoConvert_v1beta1_JWTAuthenticator_To_apiserver_JWTAuthenticator(in, out, s) +} + +func autoConvert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { + if err := Convert_apiserver_Issuer_To_v1beta1_Issuer(&in.Issuer, &out.Issuer, s); err != nil { + return err + } + out.ClaimValidationRules = *(*[]ClaimValidationRule)(unsafe.Pointer(&in.ClaimValidationRules)) + if err := Convert_apiserver_ClaimMappings_To_v1beta1_ClaimMappings(&in.ClaimMappings, &out.ClaimMappings, s); err != nil { + return err + } + out.UserValidationRules = *(*[]UserValidationRule)(unsafe.Pointer(&in.UserValidationRules)) + return nil +} + +// Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator is an autogenerated conversion function. +func Convert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(in *apiserver.JWTAuthenticator, out *JWTAuthenticator, s conversion.Scope) error { + return autoConvert_apiserver_JWTAuthenticator_To_v1beta1_JWTAuthenticator(in, out, s) +} + +func autoConvert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) + out.Expression = in.Expression + return nil +} + +// Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression is an autogenerated conversion function. +func Convert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in *PrefixedClaimOrExpression, out *apiserver.PrefixedClaimOrExpression, s conversion.Scope) error { + return autoConvert_v1beta1_PrefixedClaimOrExpression_To_apiserver_PrefixedClaimOrExpression(in, out, s) +} + +func autoConvert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { + out.Claim = in.Claim + out.Prefix = (*string)(unsafe.Pointer(in.Prefix)) + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression is an autogenerated conversion function. +func Convert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(in *apiserver.PrefixedClaimOrExpression, out *PrefixedClaimOrExpression, s conversion.Scope) error { + return autoConvert_apiserver_PrefixedClaimOrExpression_To_v1beta1_PrefixedClaimOrExpression(in, out, s) +} + +func autoConvert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*apiserver.TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport is an autogenerated conversion function. +func Convert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in *TCPTransport, out *apiserver.TCPTransport, s conversion.Scope) error { + return autoConvert_v1beta1_TCPTransport_To_apiserver_TCPTransport(in, out, s) +} + +func autoConvert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + out.URL = in.URL + out.TLSConfig = (*TLSConfig)(unsafe.Pointer(in.TLSConfig)) + return nil +} + +// Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport is an autogenerated conversion function. +func Convert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in *apiserver.TCPTransport, out *TCPTransport, s conversion.Scope) error { + return autoConvert_apiserver_TCPTransport_To_v1beta1_TCPTransport(in, out, s) +} + +func autoConvert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig is an autogenerated conversion function. +func Convert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in *TLSConfig, out *apiserver.TLSConfig, s conversion.Scope) error { + return autoConvert_v1beta1_TLSConfig_To_apiserver_TLSConfig(in, out, s) +} + +func autoConvert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + out.CABundle = in.CABundle + out.ClientKey = in.ClientKey + out.ClientCert = in.ClientCert + return nil +} + +// Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig is an autogenerated conversion function. +func Convert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in *apiserver.TLSConfig, out *TLSConfig, s conversion.Scope) error { + return autoConvert_apiserver_TLSConfig_To_v1beta1_TLSConfig(in, out, s) +} + +func autoConvert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error { + out.TracingConfiguration = in.TracingConfiguration + return nil +} + +// Convert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration is an autogenerated conversion function. +func Convert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(in *TracingConfiguration, out *apiserver.TracingConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_TracingConfiguration_To_apiserver_TracingConfiguration(in, out, s) +} + +func autoConvert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error { + out.TracingConfiguration = in.TracingConfiguration + return nil +} + +// Convert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration is an autogenerated conversion function. +func Convert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(in *apiserver.TracingConfiguration, out *TracingConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_TracingConfiguration_To_v1beta1_TracingConfiguration(in, out, s) +} + +func autoConvert_v1beta1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + out.TCP = (*apiserver.TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*apiserver.UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_v1beta1_Transport_To_apiserver_Transport is an autogenerated conversion function. +func Convert_v1beta1_Transport_To_apiserver_Transport(in *Transport, out *apiserver.Transport, s conversion.Scope) error { + return autoConvert_v1beta1_Transport_To_apiserver_Transport(in, out, s) +} + +func autoConvert_apiserver_Transport_To_v1beta1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + out.TCP = (*TCPTransport)(unsafe.Pointer(in.TCP)) + out.UDS = (*UDSTransport)(unsafe.Pointer(in.UDS)) + return nil +} + +// Convert_apiserver_Transport_To_v1beta1_Transport is an autogenerated conversion function. +func Convert_apiserver_Transport_To_v1beta1_Transport(in *apiserver.Transport, out *Transport, s conversion.Scope) error { + return autoConvert_apiserver_Transport_To_v1beta1_Transport(in, out, s) +} + +func autoConvert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport is an autogenerated conversion function. +func Convert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in *UDSTransport, out *apiserver.UDSTransport, s conversion.Scope) error { + return autoConvert_v1beta1_UDSTransport_To_apiserver_UDSTransport(in, out, s) +} + +func autoConvert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + out.UDSName = in.UDSName + return nil +} + +// Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport is an autogenerated conversion function. +func Convert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in *apiserver.UDSTransport, out *UDSTransport, s conversion.Scope) error { + return autoConvert_apiserver_UDSTransport_To_v1beta1_UDSTransport(in, out, s) +} + +func autoConvert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule is an autogenerated conversion function. +func Convert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(in *UserValidationRule, out *apiserver.UserValidationRule, s conversion.Scope) error { + return autoConvert_v1beta1_UserValidationRule_To_apiserver_UserValidationRule(in, out, s) +} + +func autoConvert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { + out.Expression = in.Expression + out.Message = in.Message + return nil +} + +// Convert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule is an autogenerated conversion function. +func Convert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(in *apiserver.UserValidationRule, out *UserValidationRule, s conversion.Scope) error { + return autoConvert_apiserver_UserValidationRule_To_v1beta1_UserValidationRule(in, out, s) +} + +func autoConvert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]apiserver.WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration is an autogenerated conversion function. +func Convert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in *WebhookConfiguration, out *apiserver.WebhookConfiguration, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookConfiguration_To_apiserver_WebhookConfiguration(in, out, s) +} + +func autoConvert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + out.SubjectAccessReviewVersion = in.SubjectAccessReviewVersion + out.MatchConditionSubjectAccessReviewVersion = in.MatchConditionSubjectAccessReviewVersion + out.FailurePolicy = in.FailurePolicy + if err := Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(&in.ConnectionInfo, &out.ConnectionInfo, s); err != nil { + return err + } + out.MatchConditions = *(*[]WebhookMatchCondition)(unsafe.Pointer(&in.MatchConditions)) + return nil +} + +// Convert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration is an autogenerated conversion function. +func Convert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(in *apiserver.WebhookConfiguration, out *WebhookConfiguration, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConfiguration_To_v1beta1_WebhookConfiguration(in, out, s) +} + +func autoConvert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in *WebhookConnectionInfo, out *apiserver.WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookConnectionInfo_To_apiserver_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + out.Type = in.Type + out.KubeConfigFile = (*string)(unsafe.Pointer(in.KubeConfigFile)) + return nil +} + +// Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo is an autogenerated conversion function. +func Convert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(in *apiserver.WebhookConnectionInfo, out *WebhookConnectionInfo, s conversion.Scope) error { + return autoConvert_apiserver_WebhookConnectionInfo_To_v1beta1_WebhookConnectionInfo(in, out, s) +} + +func autoConvert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition is an autogenerated conversion function. +func Convert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in *WebhookMatchCondition, out *apiserver.WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_v1beta1_WebhookMatchCondition_To_apiserver_WebhookMatchCondition(in, out, s) +} + +func autoConvert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + out.Expression = in.Expression + return nil +} + +// Convert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition is an autogenerated conversion function. +func Convert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(in *apiserver.WebhookMatchCondition, out *WebhookMatchCondition, s conversion.Scope) error { + return autoConvert_apiserver_WebhookMatchCondition_To_v1beta1_WebhookMatchCondition(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..0d78e51a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,553 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. +func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { + if in == nil { + return nil + } + out := new(AuthenticationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Groups.DeepCopyInto(&out.Groups) + out.UID = in.UID + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. +func (in *ClaimMappings) DeepCopy() *ClaimMappings { + if in == nil { + return nil + } + out := new(ClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. +func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { + if in == nil { + return nil + } + out := new(ClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. +func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { + if in == nil { + return nil + } + out := new(ClaimValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(Transport) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { + *out = *in + in.Connection.DeepCopyInto(&out.Connection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. +func (in *EgressSelection) DeepCopy() *EgressSelection { + if in == nil { + return nil + } + out := new(EgressSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. +func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { + if in == nil { + return nil + } + out := new(EgressSelectorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Issuer) DeepCopyInto(out *Issuer) { + *out = *in + if in.DiscoveryURL != nil { + in, out := &in.DiscoveryURL, &out.DiscoveryURL + *out = new(string) + **out = **in + } + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. +func (in *Issuer) DeepCopy() *Issuer { + if in == nil { + return nil + } + out := new(Issuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]ClaimValidationRule, len(*in)) + copy(*out, *in) + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]UserValidationRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. +func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { + if in == nil { + return nil + } + out := new(JWTAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. +func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { + if in == nil { + return nil + } + out := new(PrefixedClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. +func (in *TCPTransport) DeepCopy() *TCPTransport { + if in == nil { + return nil + } + out := new(TCPTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.TracingConfiguration.DeepCopyInto(&out.TracingConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration. +func (in *TracingConfiguration) DeepCopy() *TracingConfiguration { + if in == nil { + return nil + } + out := new(TracingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TracingConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transport) DeepCopyInto(out *Transport) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPTransport) + (*in).DeepCopyInto(*out) + } + if in.UDS != nil { + in, out := &in.UDS, &out.UDS + *out = new(UDSTransport) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. +func (in *Transport) DeepCopy() *Transport { + if in == nil { + return nil + } + out := new(Transport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. +func (in *UDSTransport) DeepCopy() *UDSTransport { + if in == nil { + return nil + } + out := new(UDSTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. +func (in *UserValidationRule) DeepCopy() *UserValidationRule { + if in == nil { + return nil + } + out := new(UserValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go new file mode 100644 index 00000000..fdbb606a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/v1beta1/zz_generated.defaults.go @@ -0,0 +1,43 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&AuthorizationConfiguration{}, func(obj interface{}) { SetObjectDefaults_AuthorizationConfiguration(obj.(*AuthorizationConfiguration)) }) + return nil +} + +func SetObjectDefaults_AuthorizationConfiguration(in *AuthorizationConfiguration) { + for i := range in.Authorizers { + a := &in.Authorizers[i] + if a.Webhook != nil { + SetDefaults_WebhookConfiguration(a.Webhook) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go new file mode 100644 index 00000000..5e80e2dc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation.go @@ -0,0 +1,795 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + celgo "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/operators" + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + + v1 "k8s.io/api/authorization/v1" + "k8s.io/api/authorization/v1beta1" + "k8s.io/apimachinery/pkg/util/sets" + utilvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + api "k8s.io/apiserver/pkg/apis/apiserver" + authenticationcel "k8s.io/apiserver/pkg/authentication/cel" + authorizationcel "k8s.io/apiserver/pkg/authorization/cel" + "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" + "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/util/cert" +) + +// ValidateAuthenticationConfiguration validates a given AuthenticationConfiguration. +func ValidateAuthenticationConfiguration(c *api.AuthenticationConfiguration, disallowedIssuers []string) field.ErrorList { + root := field.NewPath("jwt") + var allErrs field.ErrorList + + // We allow 0 authenticators in the authentication configuration. + // This allows us to support scenarios where the API server is initially set up without + // any authenticators and then authenticators are added later via dynamic config. + + if len(c.JWT) > 64 { + allErrs = append(allErrs, field.TooMany(root, len(c.JWT), 64)) + return allErrs + } + + seenIssuers := sets.New[string]() + seenDiscoveryURLs := sets.New[string]() + for i, a := range c.JWT { + fldPath := root.Index(i) + _, errs := validateJWTAuthenticator(a, fldPath, sets.New(disallowedIssuers...), utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) + allErrs = append(allErrs, errs...) + + if seenIssuers.Has(a.Issuer.URL) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("issuer").Child("url"), a.Issuer.URL)) + } + seenIssuers.Insert(a.Issuer.URL) + + if len(a.Issuer.DiscoveryURL) > 0 { + if seenDiscoveryURLs.Has(a.Issuer.DiscoveryURL) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("issuer").Child("discoveryURL"), a.Issuer.DiscoveryURL)) + } + seenDiscoveryURLs.Insert(a.Issuer.DiscoveryURL) + } + } + + if c.Anonymous != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.AnonymousAuthConfigurableEndpoints) { + allErrs = append(allErrs, field.Forbidden(field.NewPath("anonymous"), "anonymous is not supported when AnonymousAuthConfigurableEnpoints feature gate is disabled")) + } + if !c.Anonymous.Enabled && len(c.Anonymous.Conditions) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("anonymous", "conditions"), c.Anonymous.Conditions, "enabled should be set to true when conditions are defined")) + } + } + + return allErrs +} + +// CompileAndValidateJWTAuthenticator validates a given JWTAuthenticator and returns a CELMapper with the compiled +// CEL expressions for claim mappings and validation rules. +// This is exported for use in oidc package. +func CompileAndValidateJWTAuthenticator(authenticator api.JWTAuthenticator, disallowedIssuers []string) (authenticationcel.CELMapper, field.ErrorList) { + return validateJWTAuthenticator(authenticator, nil, sets.New(disallowedIssuers...), utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthenticationConfiguration)) +} + +func validateJWTAuthenticator(authenticator api.JWTAuthenticator, fldPath *field.Path, disallowedIssuers sets.Set[string], structuredAuthnFeatureEnabled bool) (authenticationcel.CELMapper, field.ErrorList) { + var allErrs field.ErrorList + + // strictCost is set to true which enables the strict cost for CEL validation. + compiler := authenticationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) + state := &validationState{} + + allErrs = append(allErrs, validateIssuer(authenticator.Issuer, disallowedIssuers, fldPath.Child("issuer"))...) + allErrs = append(allErrs, validateClaimValidationRules(compiler, state, authenticator.ClaimValidationRules, fldPath.Child("claimValidationRules"), structuredAuthnFeatureEnabled)...) + allErrs = append(allErrs, validateClaimMappings(compiler, state, authenticator.ClaimMappings, fldPath.Child("claimMappings"), structuredAuthnFeatureEnabled)...) + allErrs = append(allErrs, validateUserValidationRules(compiler, state, authenticator.UserValidationRules, fldPath.Child("userValidationRules"), structuredAuthnFeatureEnabled)...) + + return state.mapper, allErrs +} + +type validationState struct { + mapper authenticationcel.CELMapper + usesEmailClaim bool + usesEmailVerifiedClaim bool +} + +func validateIssuer(issuer api.Issuer, disallowedIssuers sets.Set[string], fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, validateIssuerURL(issuer.URL, disallowedIssuers, fldPath.Child("url"))...) + allErrs = append(allErrs, validateIssuerDiscoveryURL(issuer.URL, issuer.DiscoveryURL, fldPath.Child("discoveryURL"))...) + allErrs = append(allErrs, validateAudiences(issuer.Audiences, issuer.AudienceMatchPolicy, fldPath.Child("audiences"), fldPath.Child("audienceMatchPolicy"))...) + allErrs = append(allErrs, validateCertificateAuthority(issuer.CertificateAuthority, fldPath.Child("certificateAuthority"))...) + + return allErrs +} + +func validateIssuerURL(issuerURL string, disallowedIssuers sets.Set[string], fldPath *field.Path) field.ErrorList { + if len(issuerURL) == 0 { + return field.ErrorList{field.Required(fldPath, "URL is required")} + } + + return validateURL(issuerURL, disallowedIssuers, fldPath) +} + +func validateIssuerDiscoveryURL(issuerURL, issuerDiscoveryURL string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(issuerDiscoveryURL) == 0 { + return nil + } + + if len(issuerURL) > 0 && strings.TrimRight(issuerURL, "/") == strings.TrimRight(issuerDiscoveryURL, "/") { + allErrs = append(allErrs, field.Invalid(fldPath, issuerDiscoveryURL, "discoveryURL must be different from URL")) + } + + // issuerDiscoveryURL is not an issuer URL and does not need to validated against any set of disallowed issuers + allErrs = append(allErrs, validateURL(issuerDiscoveryURL, nil, fldPath)...) + return allErrs +} + +func validateURL(issuerURL string, disallowedIssuers sets.Set[string], fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if disallowedIssuers.Has(issuerURL) { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, fmt.Sprintf("URL must not overlap with disallowed issuers: %s", sets.List(disallowedIssuers)))) + } + + u, err := url.Parse(issuerURL) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, err.Error())) + return allErrs + } + if u.Scheme != "https" { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL scheme must be https")) + } + if u.User != nil { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a username or password")) + } + if len(u.RawQuery) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a query")) + } + if len(u.Fragment) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath, issuerURL, "URL must not contain a fragment")) + } + + return allErrs +} + +func validateAudiences(audiences []string, audienceMatchPolicy api.AudienceMatchPolicyType, fldPath, audienceMatchPolicyFldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(audiences) == 0 { + allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, fldPath))) + return allErrs + } + + seenAudiences := sets.NewString() + for i, audience := range audiences { + fldPath := fldPath.Index(i) + if len(audience) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "audience can't be empty")) + } + if seenAudiences.Has(audience) { + allErrs = append(allErrs, field.Duplicate(fldPath, audience)) + } + seenAudiences.Insert(audience) + } + + if len(audiences) > 1 && audienceMatchPolicy != api.AudienceMatchPolicyMatchAny { + allErrs = append(allErrs, field.Invalid(audienceMatchPolicyFldPath, audienceMatchPolicy, "audienceMatchPolicy must be MatchAny for multiple audiences")) + } + if len(audiences) == 1 && (len(audienceMatchPolicy) > 0 && audienceMatchPolicy != api.AudienceMatchPolicyMatchAny) { + allErrs = append(allErrs, field.Invalid(audienceMatchPolicyFldPath, audienceMatchPolicy, "audienceMatchPolicy must be empty or MatchAny for single audience")) + } + + return allErrs +} + +func validateCertificateAuthority(certificateAuthority string, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + if len(certificateAuthority) == 0 { + return allErrs + } + _, err := cert.NewPoolFromBytes([]byte(certificateAuthority)) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, "", err.Error())) + } + + return allErrs +} + +func validateClaimValidationRules(compiler authenticationcel.Compiler, state *validationState, rules []api.ClaimValidationRule, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { + var allErrs field.ErrorList + + seenClaims := sets.NewString() + seenExpressions := sets.NewString() + var compilationResults []authenticationcel.CompilationResult + + for i, rule := range rules { + fldPath := fldPath.Index(i) + + if len(rule.Expression) > 0 && !structuredAuthnFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath.Child("expression"), rule.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + + switch { + case len(rule.Claim) > 0 && len(rule.Expression) > 0: + allErrs = append(allErrs, field.Invalid(fldPath, rule.Claim, "claim and expression can't both be set")) + case len(rule.Claim) == 0 && len(rule.Expression) == 0: + allErrs = append(allErrs, field.Required(fldPath, "claim or expression is required")) + case len(rule.Claim) > 0: + if len(rule.Message) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("message"), rule.Message, "message can't be set when claim is set")) + } + if seenClaims.Has(rule.Claim) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("claim"), rule.Claim)) + } + seenClaims.Insert(rule.Claim) + case len(rule.Expression) > 0: + if len(rule.RequiredValue) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("requiredValue"), rule.RequiredValue, "requiredValue can't be set when expression is set")) + } + if seenExpressions.Has(rule.Expression) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("expression"), rule.Expression)) + continue + } + seenExpressions.Insert(rule.Expression) + + compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ClaimValidationCondition{ + Expression: rule.Expression, + Message: rule.Message, + }, fldPath.Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + continue + } + if compilationResult != nil { + compilationResults = append(compilationResults, *compilationResult) + } + } + } + + if structuredAuthnFeatureEnabled && len(compilationResults) > 0 { + state.mapper.ClaimValidationRules = authenticationcel.NewClaimsMapper(compilationResults) + state.usesEmailVerifiedClaim = state.usesEmailVerifiedClaim || anyUsesEmailVerifiedClaim(compilationResults) + } + + return allErrs +} + +func validateClaimMappings(compiler authenticationcel.Compiler, state *validationState, m api.ClaimMappings, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { + var allErrs field.ErrorList + + if !structuredAuthnFeatureEnabled { + if len(m.Username.Expression) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("username").Child("expression"), m.Username.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(m.Groups.Expression) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("groups").Child("expression"), m.Groups.Expression, "expression is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(m.UID.Claim) > 0 || len(m.UID.Expression) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), "", "uid claim mapping is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + if len(m.Extra) > 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("extra"), "", "extra claim mapping is not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + } + + compilationResult, err := validatePrefixClaimOrExpression(compiler, m.Username, fldPath.Child("username"), true) + if err != nil { + allErrs = append(allErrs, err...) + } else if compilationResult != nil && structuredAuthnFeatureEnabled { + state.usesEmailClaim = state.usesEmailClaim || usesEmailClaim(compilationResult.AST) + state.usesEmailVerifiedClaim = state.usesEmailVerifiedClaim || usesEmailVerifiedClaim(compilationResult.AST) + state.mapper.Username = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) + } + + compilationResult, err = validatePrefixClaimOrExpression(compiler, m.Groups, fldPath.Child("groups"), false) + if err != nil { + allErrs = append(allErrs, err...) + } else if compilationResult != nil && structuredAuthnFeatureEnabled { + state.mapper.Groups = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) + } + + switch { + case len(m.UID.Claim) > 0 && len(m.UID.Expression) > 0: + allErrs = append(allErrs, field.Invalid(fldPath.Child("uid"), "", "claim and expression can't both be set")) + case len(m.UID.Expression) > 0: + compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ClaimMappingExpression{ + Expression: m.UID.Expression, + }, fldPath.Child("uid").Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + } else if structuredAuthnFeatureEnabled && compilationResult != nil { + state.mapper.UID = authenticationcel.NewClaimsMapper([]authenticationcel.CompilationResult{*compilationResult}) + } + } + + var extraCompilationResults []authenticationcel.CompilationResult + seenExtraKeys := sets.NewString() + + for i, mapping := range m.Extra { + fldPath := fldPath.Child("extra").Index(i) + // Key should be namespaced to the authenticator or authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo". + // xref: https://github.com/kubernetes/kubernetes/blob/3825e206cb162a7ad7431a5bdf6a065ae8422cf7/staging/src/k8s.io/apiserver/pkg/authentication/user/user.go#L31-L41 + // IsDomainPrefixedPath checks for non-empty key and that the key is prefixed with a domain name. + allErrs = append(allErrs, utilvalidation.IsDomainPrefixedPath(fldPath.Child("key"), mapping.Key)...) + if mapping.Key != strings.ToLower(mapping.Key) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("key"), mapping.Key, "key must be lowercase")) + } + if seenExtraKeys.Has(mapping.Key) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("key"), mapping.Key)) + continue + } + seenExtraKeys.Insert(mapping.Key) + + if len(mapping.ValueExpression) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("valueExpression"), "valueExpression is required")) + continue + } + + compilationResult, err := compileClaimsCELExpression(compiler, &authenticationcel.ExtraMappingExpression{ + Key: mapping.Key, + Expression: mapping.ValueExpression, + }, fldPath.Child("valueExpression")) + + if err != nil { + allErrs = append(allErrs, err) + continue + } + + if compilationResult != nil { + extraCompilationResults = append(extraCompilationResults, *compilationResult) + } + } + + if structuredAuthnFeatureEnabled && len(extraCompilationResults) > 0 { + state.mapper.Extra = authenticationcel.NewClaimsMapper(extraCompilationResults) + state.usesEmailVerifiedClaim = state.usesEmailVerifiedClaim || anyUsesEmailVerifiedClaim(extraCompilationResults) + } + + if structuredAuthnFeatureEnabled && state.usesEmailClaim && !state.usesEmailVerifiedClaim { + allErrs = append(allErrs, field.Invalid(fldPath.Child("username", "expression"), m.Username.Expression, + "claims.email_verified must be used in claimMappings.username.expression or claimMappings.extra[*].valueExpression or claimValidationRules[*].expression when claims.email is used in claimMappings.username.expression")) + } + + return allErrs +} + +func usesEmailClaim(ast *celgo.Ast) bool { + return hasSelectExp(ast.Expr(), "claims", "email") +} + +func anyUsesEmailVerifiedClaim(results []authenticationcel.CompilationResult) bool { + for _, result := range results { + if usesEmailVerifiedClaim(result.AST) { + return true + } + } + return false +} + +func usesEmailVerifiedClaim(ast *celgo.Ast) bool { + return hasSelectExp(ast.Expr(), "claims", "email_verified") +} + +func hasSelectExp(exp *exprpb.Expr, operand, field string) bool { + if exp == nil { + return false + } + switch e := exp.ExprKind.(type) { + case *exprpb.Expr_ConstExpr, + *exprpb.Expr_IdentExpr: + return false + case *exprpb.Expr_SelectExpr: + s := e.SelectExpr + if s == nil { + return false + } + if isIdentOperand(s.Operand, operand) && s.Field == field { + return true + } + return hasSelectExp(s.Operand, operand, field) + case *exprpb.Expr_CallExpr: + c := e.CallExpr + if c == nil { + return false + } + if c.Target == nil && c.Function == operators.OptSelect && len(c.Args) == 2 && + isIdentOperand(c.Args[0], operand) && isConstField(c.Args[1], field) { + return true + } + for _, arg := range c.Args { + if hasSelectExp(arg, operand, field) { + return true + } + } + return hasSelectExp(c.Target, operand, field) + case *exprpb.Expr_ListExpr: + l := e.ListExpr + if l == nil { + return false + } + for _, element := range l.Elements { + if hasSelectExp(element, operand, field) { + return true + } + } + return false + case *exprpb.Expr_StructExpr: + s := e.StructExpr + if s == nil { + return false + } + for _, entry := range s.Entries { + if hasSelectExp(entry.GetMapKey(), operand, field) { + return true + } + if hasSelectExp(entry.Value, operand, field) { + return true + } + } + return false + case *exprpb.Expr_ComprehensionExpr: + c := e.ComprehensionExpr + if c == nil { + return false + } + return hasSelectExp(c.IterRange, operand, field) || + hasSelectExp(c.AccuInit, operand, field) || + hasSelectExp(c.LoopCondition, operand, field) || + hasSelectExp(c.LoopStep, operand, field) || + hasSelectExp(c.Result, operand, field) + default: + return false + } +} + +func isIdentOperand(exp *exprpb.Expr, operand string) bool { + if len(operand) == 0 { + return false // sanity check against default values + } + id := exp.GetIdentExpr() // does not panic even if exp is nil + return id != nil && id.Name == operand +} + +func isConstField(exp *exprpb.Expr, field string) bool { + if len(field) == 0 { + return false // sanity check against default values + } + c := exp.GetConstExpr() // does not panic even if exp is nil + return c != nil && c.GetStringValue() == field // does not panic even if c is not a string +} + +func validatePrefixClaimOrExpression(compiler authenticationcel.Compiler, mapping api.PrefixedClaimOrExpression, fldPath *field.Path, claimOrExpressionRequired bool) (*authenticationcel.CompilationResult, field.ErrorList) { + var allErrs field.ErrorList + + var compilationResult *authenticationcel.CompilationResult + switch { + case len(mapping.Expression) > 0 && len(mapping.Claim) > 0: + allErrs = append(allErrs, field.Invalid(fldPath, "", "claim and expression can't both be set")) + case len(mapping.Expression) == 0 && len(mapping.Claim) == 0 && claimOrExpressionRequired: + allErrs = append(allErrs, field.Required(fldPath, "claim or expression is required")) + case len(mapping.Expression) > 0: + var err *field.Error + + if mapping.Prefix != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("prefix"), *mapping.Prefix, "prefix can't be set when expression is set")) + } + compilationResult, err = compileClaimsCELExpression(compiler, &authenticationcel.ClaimMappingExpression{ + Expression: mapping.Expression, + }, fldPath.Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + } + + case len(mapping.Claim) > 0: + if mapping.Prefix == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("prefix"), "prefix is required when claim is set. It can be set to an empty string to disable prefixing")) + } + } + + return compilationResult, allErrs +} + +func validateUserValidationRules(compiler authenticationcel.Compiler, state *validationState, rules []api.UserValidationRule, fldPath *field.Path, structuredAuthnFeatureEnabled bool) field.ErrorList { + var allErrs field.ErrorList + var compilationResults []authenticationcel.CompilationResult + + if len(rules) > 0 && !structuredAuthnFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath, "", "user validation rules are not supported when StructuredAuthenticationConfiguration feature gate is disabled")) + } + + seenExpressions := sets.NewString() + for i, rule := range rules { + fldPath := fldPath.Index(i) + + if len(rule.Expression) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("expression"), "expression is required")) + continue + } + + if seenExpressions.Has(rule.Expression) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("expression"), rule.Expression)) + continue + } + seenExpressions.Insert(rule.Expression) + + compilationResult, err := compileUserCELExpression(compiler, &authenticationcel.UserValidationCondition{ + Expression: rule.Expression, + Message: rule.Message, + }, fldPath.Child("expression")) + + if err != nil { + allErrs = append(allErrs, err) + continue + } + + if compilationResult != nil { + compilationResults = append(compilationResults, *compilationResult) + } + } + + if structuredAuthnFeatureEnabled && len(compilationResults) > 0 { + state.mapper.UserValidationRules = authenticationcel.NewUserMapper(compilationResults) + } + + return allErrs +} + +func compileClaimsCELExpression(compiler authenticationcel.Compiler, expression authenticationcel.ExpressionAccessor, fldPath *field.Path) (*authenticationcel.CompilationResult, *field.Error) { + compilationResult, err := compiler.CompileClaimsExpression(expression) + if err != nil { + return nil, convertCELErrorToValidationError(fldPath, expression.GetExpression(), err) + } + return &compilationResult, nil +} + +func compileUserCELExpression(compiler authenticationcel.Compiler, expression authenticationcel.ExpressionAccessor, fldPath *field.Path) (*authenticationcel.CompilationResult, *field.Error) { + compilationResult, err := compiler.CompileUserExpression(expression) + if err != nil { + return nil, convertCELErrorToValidationError(fldPath, expression.GetExpression(), err) + } + return &compilationResult, nil +} + +// ValidateAuthorizationConfiguration validates a given AuthorizationConfiguration. +func ValidateAuthorizationConfiguration(fldPath *field.Path, c *api.AuthorizationConfiguration, knownTypes sets.String, repeatableTypes sets.String) field.ErrorList { + allErrs := field.ErrorList{} + + if len(c.Authorizers) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("authorizers"), "at least one authorization mode must be defined")) + } + + seenAuthorizerTypes := sets.NewString() + seenAuthorizerNames := sets.NewString() + for i, a := range c.Authorizers { + fldPath := fldPath.Child("authorizers").Index(i) + aType := string(a.Type) + if aType == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("type"), "")) + continue + } + if !knownTypes.Has(aType) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("type"), aType, knownTypes.List())) + continue + } + if seenAuthorizerTypes.Has(aType) && !repeatableTypes.Has(aType) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("type"), aType)) + continue + } + seenAuthorizerTypes.Insert(aType) + + if len(a.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) + } else if seenAuthorizerNames.Has(a.Name) { + allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), a.Name)) + } else if errs := utilvalidation.IsDNS1123Subdomain(a.Name); len(errs) != 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), a.Name, fmt.Sprintf("authorizer name is invalid: %s", strings.Join(errs, ", ")))) + } + seenAuthorizerNames.Insert(a.Name) + + switch a.Type { + case api.TypeWebhook: + if a.Webhook == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("webhook"), "required when type=Webhook")) + continue + } + allErrs = append(allErrs, ValidateWebhookConfiguration(fldPath, a.Webhook)...) + default: + if a.Webhook != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("webhook"), "non-null", "may only be specified when type=Webhook")) + } + } + } + + return allErrs +} + +func ValidateWebhookConfiguration(fldPath *field.Path, c *api.WebhookConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + + if c.Timeout.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("timeout"), "")) + } else if c.Timeout.Duration > 30*time.Second || c.Timeout.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("timeout"), c.Timeout.Duration.String(), "must be > 0s and <= 30s")) + } + + if c.AuthorizedTTL.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("authorizedTTL"), "")) + } else if c.AuthorizedTTL.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("authorizedTTL"), c.AuthorizedTTL.Duration.String(), "must be > 0s")) + } + + if c.UnauthorizedTTL.Duration == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("unauthorizedTTL"), "")) + } else if c.UnauthorizedTTL.Duration < 0 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("unauthorizedTTL"), c.UnauthorizedTTL.Duration.String(), "must be > 0s")) + } + + switch c.SubjectAccessReviewVersion { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("subjectAccessReviewVersion"), "")) + case "v1": + _ = &v1.SubjectAccessReview{} + case "v1beta1": + _ = &v1beta1.SubjectAccessReview{} + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("subjectAccessReviewVersion"), c.SubjectAccessReviewVersion, []string{"v1", "v1beta1"})) + } + + switch c.MatchConditionSubjectAccessReviewVersion { + case "": + if len(c.MatchConditions) > 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("matchConditionSubjectAccessReviewVersion"), "required if match conditions are specified")) + } + case "v1": + _ = &v1.SubjectAccessReview{} + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("matchConditionSubjectAccessReviewVersion"), c.MatchConditionSubjectAccessReviewVersion, []string{"v1"})) + } + + switch c.FailurePolicy { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("failurePolicy"), "")) + case api.FailurePolicyNoOpinion, api.FailurePolicyDeny: + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("failurePolicy"), c.FailurePolicy, []string{"NoOpinion", "Deny"})) + } + + switch c.ConnectionInfo.Type { + case "": + allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "type"), "")) + case api.AuthorizationWebhookConnectionInfoTypeInCluster: + if c.ConnectionInfo.KubeConfigFile != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "can only be set when type=KubeConfigFile")) + } + case api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile: + if c.ConnectionInfo.KubeConfigFile == nil || *c.ConnectionInfo.KubeConfigFile == "" { + allErrs = append(allErrs, field.Required(fldPath.Child("connectionInfo", "kubeConfigFile"), "")) + } else if !filepath.IsAbs(*c.ConnectionInfo.KubeConfigFile) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be an absolute path")) + } else if info, err := os.Stat(*c.ConnectionInfo.KubeConfigFile); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, fmt.Sprintf("error loading file: %v", err))) + } else if !info.Mode().IsRegular() { + allErrs = append(allErrs, field.Invalid(fldPath.Child("connectionInfo", "kubeConfigFile"), *c.ConnectionInfo.KubeConfigFile, "must be a regular file")) + } + default: + allErrs = append(allErrs, field.NotSupported(fldPath.Child("connectionInfo", "type"), c.ConnectionInfo, []string{api.AuthorizationWebhookConnectionInfoTypeInCluster, api.AuthorizationWebhookConnectionInfoTypeKubeConfigFile})) + } + + _, errs := compileMatchConditions(c.MatchConditions, fldPath, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) + allErrs = append(allErrs, errs...) + + return allErrs +} + +// ValidateAndCompileMatchConditions validates a given webhook's matchConditions. +// This is exported for use in authz package. +func ValidateAndCompileMatchConditions(matchConditions []api.WebhookMatchCondition) (*authorizationcel.CELMatcher, field.ErrorList) { + return compileMatchConditions(matchConditions, nil, utilfeature.DefaultFeatureGate.Enabled(features.StructuredAuthorizationConfiguration)) +} + +func compileMatchConditions(matchConditions []api.WebhookMatchCondition, fldPath *field.Path, structuredAuthzFeatureEnabled bool) (*authorizationcel.CELMatcher, field.ErrorList) { + var allErrs field.ErrorList + // should fail when match conditions are used without feature enabled + if len(matchConditions) > 0 && !structuredAuthzFeatureEnabled { + allErrs = append(allErrs, field.Invalid(fldPath.Child("matchConditions"), "", "matchConditions are not supported when StructuredAuthorizationConfiguration feature gate is disabled")) + } + if len(matchConditions) > 64 { + allErrs = append(allErrs, field.TooMany(fldPath.Child("matchConditions"), len(matchConditions), 64)) + return nil, allErrs + } + + // strictCost is set to true which enables the strict cost for CEL validation. + compiler := authorizationcel.NewCompiler(environment.MustBaseEnvSet(environment.DefaultCompatibilityVersion(), true)) + seenExpressions := sets.NewString() + var compilationResults []authorizationcel.CompilationResult + var usesFieldSelector, usesLabelSelector bool + + for i, condition := range matchConditions { + fldPath := fldPath.Child("matchConditions").Index(i).Child("expression") + if len(strings.TrimSpace(condition.Expression)) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + continue + } + if seenExpressions.Has(condition.Expression) { + allErrs = append(allErrs, field.Duplicate(fldPath, condition.Expression)) + continue + } + seenExpressions.Insert(condition.Expression) + compilationResult, err := compileMatchConditionsExpression(fldPath, compiler, condition.Expression) + if err != nil { + allErrs = append(allErrs, err) + continue + } + compilationResults = append(compilationResults, compilationResult) + usesFieldSelector = usesFieldSelector || compilationResult.UsesFieldSelector + usesLabelSelector = usesLabelSelector || compilationResult.UsesLabelSelector + } + if len(compilationResults) == 0 { + return nil, allErrs + } + return &authorizationcel.CELMatcher{ + CompilationResults: compilationResults, + UsesFieldSelector: usesFieldSelector, + UsesLabelSelector: usesLabelSelector, + }, allErrs +} + +func compileMatchConditionsExpression(fldPath *field.Path, compiler authorizationcel.Compiler, expression string) (authorizationcel.CompilationResult, *field.Error) { + authzExpression := &authorizationcel.SubjectAccessReviewMatchCondition{ + Expression: expression, + } + compilationResult, err := compiler.CompileCELExpression(authzExpression) + if err != nil { + return compilationResult, convertCELErrorToValidationError(fldPath, authzExpression.GetExpression(), err) + } + return compilationResult, nil +} + +func convertCELErrorToValidationError(fldPath *field.Path, expression string, err error) *field.Error { + var celErr *cel.Error + if errors.As(err, &celErr) { + switch celErr.Type { + case cel.ErrorTypeRequired: + return field.Required(fldPath, celErr.Detail) + case cel.ErrorTypeInvalid: + return field.Invalid(fldPath, expression, celErr.Detail) + default: + return field.InternalError(fldPath, celErr) + } + } + return field.InternalError(fldPath, fmt.Errorf("error is not cel error: %w", err)) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_encryption.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_encryption.go new file mode 100644 index 00000000..b8dd5fe0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/validation/validation_encryption.go @@ -0,0 +1,451 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package validation validates EncryptionConfiguration. +package validation + +import ( + "encoding/base64" + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/apiserver" +) + +const ( + moreThanOneElementErr = "more than one provider specified in a single element, should split into different list elements" + keyLenErrFmt = "secret is not of the expected length, got %d, expected one of %v" + unsupportedSchemeErrFmt = "unsupported scheme %q for KMS provider, only unix is supported" + unsupportedKMSAPIVersionErrFmt = "unsupported apiVersion %s for KMS provider, only v1 and v2 are supported" + atLeastOneRequiredErrFmt = "at least one %s is required" + invalidURLErrFmt = "invalid endpoint for kms provider, error: %v" + mandatoryFieldErrFmt = "%s is a mandatory field for a %s" + base64EncodingErr = "secrets must be base64 encoded" + zeroOrNegativeErrFmt = "%s should be a positive value" + nonZeroErrFmt = "%s should be a positive value, or negative to disable" + encryptionConfigNilErr = "EncryptionConfiguration can't be nil" + invalidKMSConfigNameErrFmt = "invalid KMS provider name %s, must not contain ':'" + duplicateKMSConfigNameErrFmt = "duplicate KMS provider name %s, names must be unique" + eventsGroupErr = "'*.events.k8s.io' objects are stored using the 'events' API group in etcd. Use 'events' instead in the config file" + extensionsGroupErr = "'extensions' group has been removed and cannot be used for encryption" + starResourceErr = "use '*.' to encrypt all the resources from core API group or *.* to encrypt all resources" + overlapErr = "using overlapping resources such as 'secrets' and '*.' in the same resource list is not allowed as they will be masked" + nonRESTAPIResourceErr = "resources which do not have REST API/s cannot be encrypted" + resourceNameErr = "resource name should not contain capital letters" + resourceAcrossGroupErr = "encrypting the same resource across groups is not supported" + duplicateResourceErr = "the same resource cannot be specified multiple times" +) + +var ( + // See https://golang.org/pkg/crypto/aes/#NewCipher for details on supported key sizes for AES. + aesKeySizes = []int{16, 24, 32} + + // See https://godoc.org/golang.org/x/crypto/nacl/secretbox#Open for details on the supported key sizes for Secretbox. + secretBoxKeySizes = []int{32} +) + +// ValidateEncryptionConfiguration validates a v1.EncryptionConfiguration. +func ValidateEncryptionConfiguration(c *apiserver.EncryptionConfiguration, reload bool) field.ErrorList { + root := field.NewPath("resources") + allErrs := field.ErrorList{} + + if c == nil { + allErrs = append(allErrs, field.Required(root, encryptionConfigNilErr)) + return allErrs + } + + if len(c.Resources) == 0 { + allErrs = append(allErrs, field.Required(root, fmt.Sprintf(atLeastOneRequiredErrFmt, root))) + return allErrs + } + + // kmsProviderNames is used to track config names to ensure they are unique. + kmsProviderNames := sets.New[string]() + for i, conf := range c.Resources { + r := root.Index(i).Child("resources") + p := root.Index(i).Child("providers") + + if len(conf.Resources) == 0 { + allErrs = append(allErrs, field.Required(r, fmt.Sprintf(atLeastOneRequiredErrFmt, r))) + } + + allErrs = append(allErrs, validateResourceOverlap(conf.Resources, r)...) + allErrs = append(allErrs, validateResourceNames(conf.Resources, r)...) + + if len(conf.Providers) == 0 { + allErrs = append(allErrs, field.Required(p, fmt.Sprintf(atLeastOneRequiredErrFmt, p))) + } + + for j, provider := range conf.Providers { + path := p.Index(j) + allErrs = append(allErrs, validateSingleProvider(provider, path)...) + + switch { + case provider.KMS != nil: + allErrs = append(allErrs, validateKMSConfiguration(provider.KMS, path.Child("kms"), kmsProviderNames, reload)...) + kmsProviderNames.Insert(provider.KMS.Name) + case provider.AESGCM != nil: + allErrs = append(allErrs, validateKeys(provider.AESGCM.Keys, path.Child("aesgcm").Child("keys"), aesKeySizes)...) + case provider.AESCBC != nil: + allErrs = append(allErrs, validateKeys(provider.AESCBC.Keys, path.Child("aescbc").Child("keys"), aesKeySizes)...) + case provider.Secretbox != nil: + allErrs = append(allErrs, validateKeys(provider.Secretbox.Keys, path.Child("secretbox").Child("keys"), secretBoxKeySizes)...) + } + } + } + + return allErrs +} + +var anyGroupAnyResource = schema.GroupResource{ + Group: "*", + Resource: "*", +} + +func validateResourceOverlap(resources []string, fieldPath *field.Path) field.ErrorList { + if len(resources) < 2 { // cannot have overlap with a single resource + return nil + } + + var allErrs field.ErrorList + + r := make([]schema.GroupResource, 0, len(resources)) + for _, resource := range resources { + r = append(r, schema.ParseGroupResource(resource)) + } + + var hasOverlap, hasDuplicate bool + + for i, r1 := range r { + for j, r2 := range r { + if i == j { + continue + } + + if r1 == r2 && !hasDuplicate { + hasDuplicate = true + continue + } + + if hasOverlap { + continue + } + + if r1 == anyGroupAnyResource { + hasOverlap = true + continue + } + + if r1.Group != r2.Group { + continue + } + + if r1.Resource == "*" || r2.Resource == "*" { + hasOverlap = true + continue + } + } + } + + if hasDuplicate { + allErrs = append( + allErrs, + field.Invalid( + fieldPath, + resources, + duplicateResourceErr, + ), + ) + } + + if hasOverlap { + allErrs = append( + allErrs, + field.Invalid( + fieldPath, + resources, + overlapErr, + ), + ) + } + + return allErrs +} + +func validateResourceNames(resources []string, fieldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + for j, res := range resources { + jj := fieldPath.Index(j) + + // check if resource name has capital letters + if hasCapital(res) { + allErrs = append( + allErrs, + field.Invalid( + jj, + resources[j], + resourceNameErr, + ), + ) + continue + } + + // check if resource is '*' + if res == "*" { + allErrs = append( + allErrs, + field.Invalid( + jj, + resources[j], + starResourceErr, + ), + ) + continue + } + + // check if resource is: + // 'apiserveripinfo' OR + // 'serviceipallocations' OR + // 'servicenodeportallocations' OR + if res == "apiserveripinfo" || + res == "serviceipallocations" || + res == "servicenodeportallocations" { + allErrs = append( + allErrs, + field.Invalid( + jj, + resources[j], + nonRESTAPIResourceErr, + ), + ) + continue + } + + // check if group is 'events.k8s.io' + gr := schema.ParseGroupResource(res) + if gr.Group == "events.k8s.io" { + allErrs = append( + allErrs, + field.Invalid( + jj, + resources[j], + eventsGroupErr, + ), + ) + continue + } + + // check if group is 'extensions' + if gr.Group == "extensions" { + allErrs = append( + allErrs, + field.Invalid( + jj, + resources[j], + extensionsGroupErr, + ), + ) + continue + } + + // disallow resource.* as encrypting the same resource across groups does not make sense + if gr.Group == "*" && gr.Resource != "*" { + allErrs = append( + allErrs, + field.Invalid( + jj, + resources[j], + resourceAcrossGroupErr, + ), + ) + continue + } + } + + return allErrs +} + +func validateSingleProvider(provider apiserver.ProviderConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + found := 0 + + if provider.KMS != nil { + found++ + } + if provider.AESGCM != nil { + found++ + } + if provider.AESCBC != nil { + found++ + } + if provider.Secretbox != nil { + found++ + } + if provider.Identity != nil { + found++ + } + + if found == 0 { + return append(allErrs, field.Invalid(fieldPath, provider, "provider does not contain any of the expected providers: KMS, AESGCM, AESCBC, Secretbox, Identity")) + } + + if found > 1 { + return append(allErrs, field.Invalid(fieldPath, provider, moreThanOneElementErr)) + } + + return allErrs +} + +func validateKeys(keys []apiserver.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if len(keys) == 0 { + allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf(atLeastOneRequiredErrFmt, "keys"))) + return allErrs + } + + for i, key := range keys { + allErrs = append(allErrs, validateKey(key, fieldPath.Index(i), expectedLen)...) + } + + return allErrs +} + +func validateKey(key apiserver.Key, fieldPath *field.Path, expectedLen []int) field.ErrorList { + allErrs := field.ErrorList{} + + if key.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("name"), fmt.Sprintf(mandatoryFieldErrFmt, "name", "key"))) + } + + if key.Secret == "" { + allErrs = append(allErrs, field.Required(fieldPath.Child("secret"), fmt.Sprintf(mandatoryFieldErrFmt, "secret", "key"))) + return allErrs + } + + secret, err := base64.StdEncoding.DecodeString(key.Secret) + if err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", base64EncodingErr)) + return allErrs + } + + lenMatched := false + for _, l := range expectedLen { + if len(secret) == l { + lenMatched = true + break + } + } + + if !lenMatched { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("secret"), "REDACTED", fmt.Sprintf(keyLenErrFmt, len(secret), expectedLen))) + } + + return allErrs +} + +func validateKMSConfiguration(c *apiserver.KMSConfiguration, fieldPath *field.Path, kmsProviderNames sets.Set[string], reload bool) field.ErrorList { + allErrs := field.ErrorList{} + + allErrs = append(allErrs, validateKMSConfigName(c, fieldPath.Child("name"), kmsProviderNames, reload)...) + allErrs = append(allErrs, validateKMSTimeout(c, fieldPath.Child("timeout"))...) + allErrs = append(allErrs, validateKMSEndpoint(c, fieldPath.Child("endpoint"))...) + allErrs = append(allErrs, validateKMSCacheSize(c, fieldPath.Child("cachesize"))...) + allErrs = append(allErrs, validateKMSAPIVersion(c, fieldPath.Child("apiVersion"))...) + return allErrs +} + +func validateKMSCacheSize(c *apiserver.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // In defaulting, we set the cache size to the default value only when API version is v1. + // So, for v2 API version, we expect the cache size field to be nil. + if c.APIVersion != "v1" && c.CacheSize != nil { + allErrs = append(allErrs, field.Invalid(fieldPath, *c.CacheSize, "cachesize is not supported in v2")) + } + if c.APIVersion == "v1" && *c.CacheSize == 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, *c.CacheSize, fmt.Sprintf(nonZeroErrFmt, "cachesize"))) + } + + return allErrs +} + +func validateKMSTimeout(c *apiserver.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.Timeout.Duration <= 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, c.Timeout, fmt.Sprintf(zeroOrNegativeErrFmt, "timeout"))) + } + + return allErrs +} + +func validateKMSEndpoint(c *apiserver.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(c.Endpoint) == 0 { + return append(allErrs, field.Invalid(fieldPath, "", fmt.Sprintf(mandatoryFieldErrFmt, "endpoint", "kms"))) + } + + u, err := url.Parse(c.Endpoint) + if err != nil { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf(invalidURLErrFmt, err))) + } + + if u.Scheme != "unix" { + return append(allErrs, field.Invalid(fieldPath, c.Endpoint, fmt.Sprintf(unsupportedSchemeErrFmt, u.Scheme))) + } + + return allErrs +} + +func validateKMSAPIVersion(c *apiserver.KMSConfiguration, fieldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if c.APIVersion != "v1" && c.APIVersion != "v2" { + allErrs = append(allErrs, field.Invalid(fieldPath, c.APIVersion, fmt.Sprintf(unsupportedKMSAPIVersionErrFmt, "apiVersion"))) + } + + return allErrs +} + +func validateKMSConfigName(c *apiserver.KMSConfiguration, fieldPath *field.Path, kmsProviderNames sets.Set[string], reload bool) field.ErrorList { + allErrs := field.ErrorList{} + if c.Name == "" { + allErrs = append(allErrs, field.Required(fieldPath, fmt.Sprintf(mandatoryFieldErrFmt, "name", "provider"))) + } + + // kms v2 providers are not allowed to have a ":" in their name + if c.APIVersion != "v1" && strings.Contains(c.Name, ":") { + allErrs = append(allErrs, field.Invalid(fieldPath, c.Name, fmt.Sprintf(invalidKMSConfigNameErrFmt, c.Name))) + } + + // kms v2 providers name must always be unique across all kms providers (v1 and v2) + // kms v1 provider names must be unique across all kms providers (v1 and v2) when hot reloading of encryption configuration is enabled (reload=true) + if reload || c.APIVersion != "v1" { + if kmsProviderNames.Has(c.Name) { + allErrs = append(allErrs, field.Invalid(fieldPath, c.Name, fmt.Sprintf(duplicateKMSConfigNameErrFmt, c.Name))) + } + } + + return allErrs +} + +func hasCapital(input string) bool { + return strings.ToLower(input) != input +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go new file mode 100644 index 00000000..6439e822 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/apiserver/zz_generated.deepcopy.go @@ -0,0 +1,803 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package apiserver + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AESConfiguration) DeepCopyInto(out *AESConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AESConfiguration. +func (in *AESConfiguration) DeepCopy() *AESConfiguration { + if in == nil { + return nil + } + out := new(AESConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfiguration) DeepCopyInto(out *AdmissionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make([]AdmissionPluginConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfiguration. +func (in *AdmissionConfiguration) DeepCopy() *AdmissionConfiguration { + if in == nil { + return nil + } + out := new(AdmissionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AdmissionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfiguration) DeepCopyInto(out *AdmissionPluginConfiguration) { + *out = *in + if in.Configuration != nil { + in, out := &in.Configuration, &out.Configuration + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfiguration. +func (in *AdmissionPluginConfiguration) DeepCopy() *AdmissionPluginConfiguration { + if in == nil { + return nil + } + out := new(AdmissionPluginConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthCondition) DeepCopyInto(out *AnonymousAuthCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthCondition. +func (in *AnonymousAuthCondition) DeepCopy() *AnonymousAuthCondition { + if in == nil { + return nil + } + out := new(AnonymousAuthCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnonymousAuthConfig) DeepCopyInto(out *AnonymousAuthConfig) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AnonymousAuthCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnonymousAuthConfig. +func (in *AnonymousAuthConfig) DeepCopy() *AnonymousAuthConfig { + if in == nil { + return nil + } + out := new(AnonymousAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationConfiguration) DeepCopyInto(out *AuthenticationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.JWT != nil { + in, out := &in.JWT, &out.JWT + *out = make([]JWTAuthenticator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Anonymous != nil { + in, out := &in.Anonymous, &out.Anonymous + *out = new(AnonymousAuthConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationConfiguration. +func (in *AuthenticationConfiguration) DeepCopy() *AuthenticationConfiguration { + if in == nil { + return nil + } + out := new(AuthenticationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizationConfiguration) DeepCopyInto(out *AuthorizationConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Authorizers != nil { + in, out := &in.Authorizers, &out.Authorizers + *out = make([]AuthorizerConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizationConfiguration. +func (in *AuthorizationConfiguration) DeepCopy() *AuthorizationConfiguration { + if in == nil { + return nil + } + out := new(AuthorizationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthorizationConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizerConfiguration) DeepCopyInto(out *AuthorizerConfiguration) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizerConfiguration. +func (in *AuthorizerConfiguration) DeepCopy() *AuthorizerConfiguration { + if in == nil { + return nil + } + out := new(AuthorizerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimMappings) DeepCopyInto(out *ClaimMappings) { + *out = *in + in.Username.DeepCopyInto(&out.Username) + in.Groups.DeepCopyInto(&out.Groups) + out.UID = in.UID + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimMappings. +func (in *ClaimMappings) DeepCopy() *ClaimMappings { + if in == nil { + return nil + } + out := new(ClaimMappings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimOrExpression) DeepCopyInto(out *ClaimOrExpression) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimOrExpression. +func (in *ClaimOrExpression) DeepCopy() *ClaimOrExpression { + if in == nil { + return nil + } + out := new(ClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimValidationRule) DeepCopyInto(out *ClaimValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimValidationRule. +func (in *ClaimValidationRule) DeepCopy() *ClaimValidationRule { + if in == nil { + return nil + } + out := new(ClaimValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Connection) DeepCopyInto(out *Connection) { + *out = *in + if in.Transport != nil { + in, out := &in.Transport, &out.Transport + *out = new(Transport) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Connection. +func (in *Connection) DeepCopy() *Connection { + if in == nil { + return nil + } + out := new(Connection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelection) DeepCopyInto(out *EgressSelection) { + *out = *in + in.Connection.DeepCopyInto(&out.Connection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelection. +func (in *EgressSelection) DeepCopy() *EgressSelection { + if in == nil { + return nil + } + out := new(EgressSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressSelectorConfiguration) DeepCopyInto(out *EgressSelectorConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.EgressSelections != nil { + in, out := &in.EgressSelections, &out.EgressSelections + *out = make([]EgressSelection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressSelectorConfiguration. +func (in *EgressSelectorConfiguration) DeepCopy() *EgressSelectorConfiguration { + if in == nil { + return nil + } + out := new(EgressSelectorConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressSelectorConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionConfiguration) DeepCopyInto(out *EncryptionConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionConfiguration. +func (in *EncryptionConfiguration) DeepCopy() *EncryptionConfiguration { + if in == nil { + return nil + } + out := new(EncryptionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EncryptionConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityConfiguration) DeepCopyInto(out *IdentityConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityConfiguration. +func (in *IdentityConfiguration) DeepCopy() *IdentityConfiguration { + if in == nil { + return nil + } + out := new(IdentityConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Issuer) DeepCopyInto(out *Issuer) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Issuer. +func (in *Issuer) DeepCopy() *Issuer { + if in == nil { + return nil + } + out := new(Issuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JWTAuthenticator) DeepCopyInto(out *JWTAuthenticator) { + *out = *in + in.Issuer.DeepCopyInto(&out.Issuer) + if in.ClaimValidationRules != nil { + in, out := &in.ClaimValidationRules, &out.ClaimValidationRules + *out = make([]ClaimValidationRule, len(*in)) + copy(*out, *in) + } + in.ClaimMappings.DeepCopyInto(&out.ClaimMappings) + if in.UserValidationRules != nil { + in, out := &in.UserValidationRules, &out.UserValidationRules + *out = make([]UserValidationRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JWTAuthenticator. +func (in *JWTAuthenticator) DeepCopy() *JWTAuthenticator { + if in == nil { + return nil + } + out := new(JWTAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfiguration) DeepCopyInto(out *KMSConfiguration) { + *out = *in + if in.CacheSize != nil { + in, out := &in.CacheSize, &out.CacheSize + *out = new(int32) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfiguration. +func (in *KMSConfiguration) DeepCopy() *KMSConfiguration { + if in == nil { + return nil + } + out := new(KMSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Key) DeepCopyInto(out *Key) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Key. +func (in *Key) DeepCopy() *Key { + if in == nil { + return nil + } + out := new(Key) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixedClaimOrExpression) DeepCopyInto(out *PrefixedClaimOrExpression) { + *out = *in + if in.Prefix != nil { + in, out := &in.Prefix, &out.Prefix + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixedClaimOrExpression. +func (in *PrefixedClaimOrExpression) DeepCopy() *PrefixedClaimOrExpression { + if in == nil { + return nil + } + out := new(PrefixedClaimOrExpression) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfiguration) DeepCopyInto(out *ProviderConfiguration) { + *out = *in + if in.AESGCM != nil { + in, out := &in.AESGCM, &out.AESGCM + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.AESCBC != nil { + in, out := &in.AESCBC, &out.AESCBC + *out = new(AESConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Secretbox != nil { + in, out := &in.Secretbox, &out.Secretbox + *out = new(SecretboxConfiguration) + (*in).DeepCopyInto(*out) + } + if in.Identity != nil { + in, out := &in.Identity, &out.Identity + *out = new(IdentityConfiguration) + **out = **in + } + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfiguration. +func (in *ProviderConfiguration) DeepCopy() *ProviderConfiguration { + if in == nil { + return nil + } + out := new(ProviderConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceConfiguration) DeepCopyInto(out *ResourceConfiguration) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Providers != nil { + in, out := &in.Providers, &out.Providers + *out = make([]ProviderConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceConfiguration. +func (in *ResourceConfiguration) DeepCopy() *ResourceConfiguration { + if in == nil { + return nil + } + out := new(ResourceConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretboxConfiguration) DeepCopyInto(out *SecretboxConfiguration) { + *out = *in + if in.Keys != nil { + in, out := &in.Keys, &out.Keys + *out = make([]Key, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretboxConfiguration. +func (in *SecretboxConfiguration) DeepCopy() *SecretboxConfiguration { + if in == nil { + return nil + } + out := new(SecretboxConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPTransport) DeepCopyInto(out *TCPTransport) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(TLSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPTransport. +func (in *TCPTransport) DeepCopy() *TCPTransport { + if in == nil { + return nil + } + out := new(TCPTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.TracingConfiguration.DeepCopyInto(&out.TracingConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration. +func (in *TracingConfiguration) DeepCopy() *TracingConfiguration { + if in == nil { + return nil + } + out := new(TracingConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TracingConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Transport) DeepCopyInto(out *Transport) { + *out = *in + if in.TCP != nil { + in, out := &in.TCP, &out.TCP + *out = new(TCPTransport) + (*in).DeepCopyInto(*out) + } + if in.UDS != nil { + in, out := &in.UDS, &out.UDS + *out = new(UDSTransport) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Transport. +func (in *Transport) DeepCopy() *Transport { + if in == nil { + return nil + } + out := new(Transport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UDSTransport) DeepCopyInto(out *UDSTransport) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UDSTransport. +func (in *UDSTransport) DeepCopy() *UDSTransport { + if in == nil { + return nil + } + out := new(UDSTransport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserValidationRule) DeepCopyInto(out *UserValidationRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserValidationRule. +func (in *UserValidationRule) DeepCopy() *UserValidationRule { + if in == nil { + return nil + } + out := new(UserValidationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConfiguration) DeepCopyInto(out *WebhookConfiguration) { + *out = *in + out.AuthorizedTTL = in.AuthorizedTTL + out.UnauthorizedTTL = in.UnauthorizedTTL + out.Timeout = in.Timeout + in.ConnectionInfo.DeepCopyInto(&out.ConnectionInfo) + if in.MatchConditions != nil { + in, out := &in.MatchConditions, &out.MatchConditions + *out = make([]WebhookMatchCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfiguration. +func (in *WebhookConfiguration) DeepCopy() *WebhookConfiguration { + if in == nil { + return nil + } + out := new(WebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookConnectionInfo) DeepCopyInto(out *WebhookConnectionInfo) { + *out = *in + if in.KubeConfigFile != nil { + in, out := &in.KubeConfigFile, &out.KubeConfigFile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConnectionInfo. +func (in *WebhookConnectionInfo) DeepCopy() *WebhookConnectionInfo { + if in == nil { + return nil + } + out := new(WebhookConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookMatchCondition) DeepCopyInto(out *WebhookMatchCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookMatchCondition. +func (in *WebhookMatchCondition) DeepCopy() *WebhookMatchCondition { + if in == nil { + return nil + } + out := new(WebhookMatchCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS b/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS new file mode 100644 index 00000000..72f9f9c6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: + - sig-auth-audit-approvers + - sig-auth-audit-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go new file mode 100644 index 00000000..deda9cbd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=audit.k8s.io + +package audit // import "k8s.io/apiserver/pkg/apis/audit" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go b/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go new file mode 100644 index 00000000..05fe72c0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/helpers.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +func ordLevel(l Level) int { + switch l { + case LevelMetadata: + return 1 + case LevelRequest: + return 2 + case LevelRequestResponse: + return 3 + default: + return 0 + } +} + +func (a Level) Less(b Level) bool { + return ordLevel(a) < ordLevel(b) +} + +func (a Level) GreaterOrEqual(b Level) bool { + return ordLevel(a) >= ordLevel(b) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/register.go new file mode 100644 index 00000000..9abf739a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "audit.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + &Policy{}, + &PolicyList{}, + ) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go new file mode 100644 index 00000000..17a398ed --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/types.go @@ -0,0 +1,312 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Header keys used by the audit system. +const ( + // Header to hold the audit ID as the request is propagated through the serving hierarchy. The + // Audit-ID header should be set by the first server to receive the request (e.g. the federation + // server or kube-aggregator). + // + // Audit ID is also returned to client by http response header. + // It's not guaranteed Audit-Id http header is sent for all requests. When kube-apiserver didn't + // audit the events according to the audit policy, no Audit-ID is returned. Also, for request to + // pods/exec, pods/attach, pods/proxy, kube-apiserver works like a proxy and redirect the request + // to kubelet node, users will only get http headers sent from kubelet node, so no Audit-ID is + // sent when users run command like "kubectl exec" or "kubectl attach". + HeaderAuditID = "Audit-ID" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling that audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated as soon as the audit handler receives the request, and before it + // is delegated down the handler chain. + StageRequestReceived Stage = "RequestReceived" + // The stage for events generated once the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted Stage = "ResponseStarted" + // The stage for events generated once the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete Stage = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic Stage = "Panic" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event captures all the information that can be included in an API audit log. +type Event struct { + metav1.TypeMeta + + // AuditLevel at which event was generated + Level Level + + // Unique audit ID, generated for each request. + AuditID types.UID + // Stage of the request handling when this event instance was generated. + Stage Stage + + // RequestURI is the request URI as sent by the client to a server. + RequestURI string + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + Verb string + // Authenticated user information. + User authnv1.UserInfo + // Impersonated user information. + // +optional + ImpersonatedUser *authnv1.UserInfo + // Source IPs, from where the request originated and intermediate proxies. + // The source IPs are listed from (in order): + // 1. X-Forwarded-For request header IPs + // 2. X-Real-Ip header, if not present in the X-Forwarded-For list + // 3. The remote address for the connection, if it doesn't match the last + // IP in the list up to here (X-Forwarded-For or X-Real-Ip). + // Note: All but the last IP can be arbitrarily set by the client. + // +optional + SourceIPs []string + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + UserAgent string + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + ObjectRef *ObjectReference + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code. For non-status type + // error responses, this will be auto-populated with the error Message. + // +optional + ResponseStatus *metav1.Status + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + RequestObject *runtime.Unknown + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + ResponseObject *runtime.Unknown + + // Time the request reached the apiserver. + RequestReceivedTimestamp metav1.MicroTime + // Time the request reached current audit stage. + StageTimestamp metav1.MicroTime + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + Annotations map[string]string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of audit Events. +type EventList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Event +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +type Policy struct { + metav1.TypeMeta + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + Rules []PolicyRule + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + OmitStages []Stage + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + OmitManagedFields bool +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyList is a list of audit Policies. +type PolicyList struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []Policy +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +type PolicyRule struct { + // The Level that requests matching this rule are recorded at. + Level Level + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + Users []string + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + UserGroups []string + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + Verbs []string + + // Rules can apply to API resources (such as "pods" or "secrets"), + // non-resource URL paths (such as "/api"), or neither, but not both. + // If neither is specified, the rule is treated as a default for all URLs. + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + Resources []GroupResources + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + Namespaces []string + + // NonResourceURLs is a set of URL paths that should be audited. + // `*`s are allowed, but only as the full, final step in the path. + // Examples: + // `/metrics` - Log requests for apiserver metrics + // `/healthz*` - Log all health checks + // +optional + NonResourceURLs []string + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + OmitStages []Stage + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + OmitManagedFields *bool +} + +// GroupResources represents resource kinds in an API group. +type GroupResources struct { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + Group string + // Resources is a list of resources this rule applies to. + // + // For example: + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + Resources []string + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + ResourceNames []string +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Resource string + // +optional + Namespace string + // +optional + Name string + // +optional + UID types.UID + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + APIGroup string + // APIVersion is the version of the API group that contains the referred object. + // +optional + APIVersion string + // +optional + ResourceVersion string + // +optional + Subresource string +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go new file mode 100644 index 00000000..d1f180c9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/audit +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=audit.k8s.io + +package v1 // import "k8s.io/apiserver/pkg/apis/audit/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go new file mode 100644 index 00000000..27dab8c0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.pb.go @@ -0,0 +1,3230 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/apiserver/pkg/apis/audit/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/api/authentication/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *GroupResources) Reset() { *m = GroupResources{} } +func (*GroupResources) ProtoMessage() {} +func (*GroupResources) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{2} +} +func (m *GroupResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResources.Merge(m, src) +} +func (m *GroupResources) XXX_Size() int { + return m.Size() +} +func (m *GroupResources) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResources.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResources proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{3} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{4} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *PolicyList) Reset() { *m = PolicyList{} } +func (*PolicyList) ProtoMessage() {} +func (*PolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{5} +} +func (m *PolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyList.Merge(m, src) +} +func (m *PolicyList) XXX_Size() int { + return m.Size() +} +func (m *PolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_62937bb89ca7b6dd, []int{6} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Event") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Event.AnnotationsEntry") + proto.RegisterType((*EventList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.EventList") + proto.RegisterType((*GroupResources)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.GroupResources") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.ObjectReference") + proto.RegisterType((*Policy)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.Policy") + proto.RegisterType((*PolicyList)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.PolicyList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.apiserver.pkg.apis.audit.v1.PolicyRule") +} + +func init() { + proto.RegisterFile("k8s.io/apiserver/pkg/apis/audit/v1/generated.proto", fileDescriptor_62937bb89ca7b6dd) +} + +var fileDescriptor_62937bb89ca7b6dd = []byte{ + // 1275 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xcf, 0x6f, 0x1b, 0xd5, + 0x13, 0xcf, 0xc6, 0x71, 0x63, 0x8f, 0x1b, 0xc7, 0x79, 0xed, 0xf7, 0xdb, 0x25, 0x07, 0xdb, 0x18, + 0x09, 0x05, 0x08, 0xeb, 0xd6, 0x14, 0x5a, 0x55, 0x02, 0xc9, 0xa6, 0xa5, 0xb5, 0xd4, 0xa6, 0xd1, + 0x33, 0xee, 0x01, 0x71, 0xe8, 0x7a, 0x3d, 0xb5, 0x97, 0xd8, 0xbb, 0xdb, 0x7d, 0x6f, 0x8d, 0x72, + 0xe3, 0x1f, 0x40, 0xe2, 0xce, 0x7f, 0xc1, 0x0d, 0x71, 0xe2, 0x96, 0x63, 0x8f, 0x3d, 0x59, 0xc4, + 0xf0, 0x57, 0xe4, 0x80, 0xd0, 0x7b, 0xfb, 0xf6, 0x87, 0x9d, 0x58, 0x71, 0x38, 0x70, 0xf3, 0x9b, + 0xf9, 0x7c, 0x3e, 0x33, 0x3b, 0x3b, 0x33, 0x6f, 0x0d, 0x8d, 0xa3, 0xfb, 0xcc, 0xb0, 0xdd, 0xba, + 0xe9, 0xd9, 0x0c, 0xfd, 0x09, 0xfa, 0x75, 0xef, 0x68, 0x20, 0x4f, 0x75, 0x33, 0xe8, 0xdb, 0xbc, + 0x3e, 0xb9, 0x53, 0x1f, 0xa0, 0x83, 0xbe, 0xc9, 0xb1, 0x6f, 0x78, 0xbe, 0xcb, 0x5d, 0x52, 0x0b, + 0x39, 0x46, 0xcc, 0x31, 0xbc, 0xa3, 0x81, 0x3c, 0x19, 0x92, 0x63, 0x4c, 0xee, 0xec, 0x7e, 0x3c, + 0xb0, 0xf9, 0x30, 0xe8, 0x19, 0x96, 0x3b, 0xae, 0x0f, 0xdc, 0x81, 0x5b, 0x97, 0xd4, 0x5e, 0xf0, + 0x4a, 0x9e, 0xe4, 0x41, 0xfe, 0x0a, 0x25, 0x77, 0xf7, 0x93, 0x34, 0xea, 0x66, 0xc0, 0x87, 0xe8, + 0x70, 0xdb, 0x32, 0xb9, 0xed, 0x3a, 0x17, 0x24, 0xb0, 0x7b, 0x37, 0x41, 0x8f, 0x4d, 0x6b, 0x68, + 0x3b, 0xe8, 0x1f, 0x27, 0x79, 0x8f, 0x91, 0x9b, 0x17, 0xb1, 0xea, 0xcb, 0x58, 0x7e, 0xe0, 0x70, + 0x7b, 0x8c, 0xe7, 0x08, 0x9f, 0x5d, 0x46, 0x60, 0xd6, 0x10, 0xc7, 0xe6, 0x22, 0xaf, 0xf6, 0x17, + 0x40, 0xf6, 0xd1, 0x04, 0x1d, 0x4e, 0xf6, 0x21, 0x3b, 0xc2, 0x09, 0x8e, 0x74, 0xad, 0xaa, 0xed, + 0xe5, 0x5b, 0xff, 0x3f, 0x99, 0x56, 0xd6, 0x66, 0xd3, 0x4a, 0xf6, 0xa9, 0x30, 0x9e, 0x45, 0x3f, + 0x68, 0x08, 0x22, 0x07, 0xb0, 0x29, 0xeb, 0xd7, 0x7e, 0xa8, 0xaf, 0x4b, 0xfc, 0x5d, 0x85, 0xdf, + 0x6c, 0x86, 0xe6, 0xb3, 0x69, 0xe5, 0xdd, 0x65, 0x39, 0xf1, 0x63, 0x0f, 0x99, 0xd1, 0x6d, 0x3f, + 0xa4, 0x91, 0x88, 0x88, 0xce, 0xb8, 0x39, 0x40, 0x3d, 0x33, 0x1f, 0xbd, 0x23, 0x8c, 0x67, 0xd1, + 0x0f, 0x1a, 0x82, 0x48, 0x03, 0xc0, 0xc7, 0xd7, 0x01, 0x32, 0xde, 0xa5, 0x6d, 0x7d, 0x43, 0x52, + 0x88, 0xa2, 0x00, 0x8d, 0x3d, 0x34, 0x85, 0x22, 0x55, 0xd8, 0x98, 0xa0, 0xdf, 0xd3, 0xb3, 0x12, + 0x7d, 0x5d, 0xa1, 0x37, 0x5e, 0xa0, 0xdf, 0xa3, 0xd2, 0x43, 0x9e, 0xc0, 0x46, 0xc0, 0xd0, 0xd7, + 0xaf, 0x55, 0xb5, 0xbd, 0x42, 0xe3, 0x7d, 0x23, 0x69, 0x1d, 0x63, 0xfe, 0x3d, 0x1b, 0x93, 0x3b, + 0x46, 0x97, 0xa1, 0xdf, 0x76, 0x5e, 0xb9, 0x89, 0x92, 0xb0, 0x50, 0xa9, 0x40, 0x86, 0x50, 0xb2, + 0xc7, 0x1e, 0xfa, 0xcc, 0x75, 0x44, 0xad, 0x85, 0x47, 0xdf, 0xbc, 0x92, 0xea, 0xcd, 0xd9, 0xb4, + 0x52, 0x6a, 0x2f, 0x68, 0xd0, 0x73, 0xaa, 0xe4, 0x23, 0xc8, 0x33, 0x37, 0xf0, 0x2d, 0x6c, 0x1f, + 0x32, 0x3d, 0x57, 0xcd, 0xec, 0xe5, 0x5b, 0x5b, 0xb3, 0x69, 0x25, 0xdf, 0x89, 0x8c, 0x34, 0xf1, + 0x93, 0x3a, 0xe4, 0x45, 0x7a, 0xcd, 0x01, 0x3a, 0x5c, 0x2f, 0xc9, 0x3a, 0xec, 0xa8, 0xec, 0xf3, + 0xdd, 0xc8, 0x41, 0x13, 0x0c, 0x79, 0x09, 0x79, 0xb7, 0xf7, 0x1d, 0x5a, 0x9c, 0xe2, 0x2b, 0x3d, + 0x2f, 0x1f, 0xe0, 0x13, 0xe3, 0xf2, 0x89, 0x32, 0x9e, 0x47, 0x24, 0xf4, 0xd1, 0xb1, 0x30, 0x4c, + 0x29, 0x36, 0xd2, 0x44, 0x94, 0x0c, 0xa1, 0xe8, 0x23, 0xf3, 0x5c, 0x87, 0x61, 0x87, 0x9b, 0x3c, + 0x60, 0x3a, 0xc8, 0x30, 0xfb, 0xa9, 0x30, 0x71, 0xf3, 0x24, 0x91, 0xc4, 0xdc, 0x88, 0x40, 0x21, + 0xa7, 0x45, 0x66, 0xd3, 0x4a, 0x91, 0xce, 0xe9, 0xd0, 0x05, 0x5d, 0x62, 0xc2, 0x96, 0xea, 0x86, + 0x30, 0x11, 0xbd, 0x20, 0x03, 0xed, 0x2d, 0x0d, 0xa4, 0x26, 0xc7, 0xe8, 0x3a, 0x47, 0x8e, 0xfb, + 0xbd, 0xd3, 0xda, 0x99, 0x4d, 0x2b, 0x5b, 0x34, 0x2d, 0x41, 0xe7, 0x15, 0x49, 0x3f, 0x79, 0x18, + 0x15, 0xe3, 0xfa, 0x15, 0x63, 0xcc, 0x3d, 0x88, 0x0a, 0xb2, 0xa0, 0x49, 0x7e, 0xd4, 0x40, 0x57, + 0x71, 0x29, 0x5a, 0x68, 0x4f, 0xb0, 0xff, 0xb5, 0x3d, 0x46, 0xc6, 0xcd, 0xb1, 0xa7, 0x6f, 0xc9, + 0x80, 0xf5, 0xd5, 0xaa, 0xf7, 0xcc, 0xb6, 0x7c, 0x57, 0x70, 0x5b, 0x55, 0xd5, 0x06, 0x3a, 0x5d, + 0x22, 0x4c, 0x97, 0x86, 0x24, 0x2e, 0x14, 0xe5, 0x54, 0x26, 0x49, 0x14, 0xff, 0x5d, 0x12, 0xd1, + 0xd0, 0x17, 0x3b, 0x73, 0x72, 0x74, 0x41, 0x9e, 0xbc, 0x86, 0x82, 0xe9, 0x38, 0x2e, 0x97, 0x53, + 0xc3, 0xf4, 0xed, 0x6a, 0x66, 0xaf, 0xd0, 0x78, 0xb0, 0x4a, 0x5f, 0xca, 0x4d, 0x67, 0x34, 0x13, + 0xf2, 0x23, 0x87, 0xfb, 0xc7, 0xad, 0x1b, 0x2a, 0x70, 0x21, 0xe5, 0xa1, 0xe9, 0x18, 0xbb, 0x5f, + 0x40, 0x69, 0x91, 0x45, 0x4a, 0x90, 0x39, 0xc2, 0xe3, 0x70, 0x5d, 0x52, 0xf1, 0x93, 0xdc, 0x84, + 0xec, 0xc4, 0x1c, 0x05, 0x18, 0xae, 0x44, 0x1a, 0x1e, 0x1e, 0xac, 0xdf, 0xd7, 0x6a, 0xbf, 0x6a, + 0x90, 0x97, 0xc1, 0x9f, 0xda, 0x8c, 0x93, 0x6f, 0x21, 0x27, 0x9e, 0xbe, 0x6f, 0x72, 0x53, 0xd2, + 0x0b, 0x0d, 0x63, 0xb5, 0x5a, 0x09, 0xf6, 0x33, 0xe4, 0x66, 0xab, 0xa4, 0x32, 0xce, 0x45, 0x16, + 0x1a, 0x2b, 0x92, 0x03, 0xc8, 0xda, 0x1c, 0xc7, 0x4c, 0x5f, 0x97, 0x85, 0xf9, 0x60, 0xe5, 0xc2, + 0xb4, 0xb6, 0xa2, 0xad, 0xdb, 0x16, 0x7c, 0x1a, 0xca, 0xd4, 0x7e, 0xd6, 0xa0, 0xf8, 0xd8, 0x77, + 0x03, 0x8f, 0x62, 0xb8, 0x4a, 0x18, 0x79, 0x0f, 0xb2, 0x03, 0x61, 0x51, 0x77, 0x45, 0xcc, 0x0b, + 0x61, 0xa1, 0x4f, 0xac, 0x26, 0x3f, 0x62, 0xc8, 0x5c, 0xd4, 0x6a, 0x8a, 0x65, 0x68, 0xe2, 0x27, + 0xf7, 0xc4, 0x74, 0x86, 0x87, 0x03, 0x73, 0x8c, 0x4c, 0xcf, 0x48, 0x82, 0x9a, 0xb9, 0x94, 0x83, + 0xce, 0xe3, 0x6a, 0xbf, 0x64, 0x60, 0x7b, 0x61, 0xdd, 0x90, 0x7d, 0xc8, 0x45, 0x20, 0x95, 0x61, + 0x5c, 0xaf, 0x48, 0x8b, 0xc6, 0x08, 0xb1, 0x15, 0x1d, 0x21, 0xe5, 0x99, 0x96, 0x7a, 0x73, 0xc9, + 0x56, 0x3c, 0x88, 0x1c, 0x34, 0xc1, 0x88, 0x9b, 0x44, 0x1c, 0xd4, 0x55, 0x15, 0xef, 0x7f, 0x81, + 0xa5, 0xd2, 0x43, 0x5a, 0x90, 0x09, 0xec, 0xbe, 0xba, 0x98, 0x6e, 0x2b, 0x40, 0xa6, 0xbb, 0xea, + 0xad, 0x28, 0xc8, 0xe2, 0x21, 0x4c, 0xcf, 0x96, 0x15, 0x55, 0x77, 0x56, 0xfc, 0x10, 0xcd, 0xc3, + 0x76, 0x58, 0xe9, 0x18, 0x21, 0x6e, 0x44, 0xd3, 0xb3, 0x5f, 0xa0, 0xcf, 0x6c, 0xd7, 0x91, 0x37, + 0x58, 0xea, 0x46, 0x6c, 0x1e, 0xb6, 0x95, 0x87, 0xa6, 0x50, 0xa4, 0x09, 0xdb, 0x51, 0x11, 0x22, + 0xe2, 0xa6, 0x24, 0xde, 0x52, 0xc4, 0x6d, 0x3a, 0xef, 0xa6, 0x8b, 0x78, 0xf2, 0x29, 0x14, 0x58, + 0xd0, 0x8b, 0x8b, 0x9d, 0x93, 0xf4, 0x78, 0x9c, 0x3a, 0x89, 0x8b, 0xa6, 0x71, 0xb5, 0xdf, 0xd7, + 0xe1, 0xda, 0xa1, 0x3b, 0xb2, 0xad, 0x63, 0xf2, 0xf2, 0xdc, 0x2c, 0xdc, 0x5e, 0x6d, 0x16, 0xc2, + 0x97, 0x2e, 0xa7, 0x21, 0x7e, 0xd0, 0xc4, 0x96, 0x9a, 0x87, 0x0e, 0x64, 0xfd, 0x60, 0x84, 0xd1, + 0x3c, 0x18, 0xab, 0xcc, 0x43, 0x98, 0x1c, 0x0d, 0x46, 0x98, 0x34, 0xb7, 0x38, 0x31, 0x1a, 0x6a, + 0x91, 0x7b, 0x00, 0xee, 0xd8, 0xe6, 0x72, 0x53, 0x45, 0xcd, 0x7a, 0x4b, 0xa6, 0x10, 0x5b, 0x93, + 0xaf, 0x96, 0x14, 0x94, 0x3c, 0x86, 0x1d, 0x71, 0x7a, 0x66, 0x3a, 0xe6, 0x00, 0xfb, 0x5f, 0xd9, + 0x38, 0xea, 0x33, 0xd9, 0x28, 0xb9, 0xd6, 0x3b, 0x2a, 0xd2, 0xce, 0xf3, 0x45, 0x00, 0x3d, 0xcf, + 0xa9, 0xfd, 0xa6, 0x01, 0x84, 0x69, 0xfe, 0x07, 0x3b, 0xe5, 0xf9, 0xfc, 0x4e, 0xf9, 0x70, 0xf5, + 0x1a, 0x2e, 0x59, 0x2a, 0x7f, 0x67, 0xa2, 0xec, 0x45, 0x59, 0xaf, 0xf8, 0xf1, 0x59, 0x81, 0xac, + 0xf8, 0x46, 0x89, 0xb6, 0x4a, 0x5e, 0x20, 0xc5, 0xf7, 0x0b, 0xa3, 0xa1, 0x9d, 0x18, 0x00, 0xe2, + 0x87, 0x1c, 0x8d, 0xe8, 0xed, 0x14, 0xc5, 0xdb, 0xe9, 0xc6, 0x56, 0x9a, 0x42, 0x08, 0x41, 0xf1, + 0x05, 0x28, 0x5e, 0x44, 0x2c, 0x28, 0x3e, 0x0c, 0x19, 0x0d, 0xed, 0xc4, 0x4a, 0xef, 0xb2, 0xac, + 0xac, 0x41, 0x63, 0x95, 0x1a, 0xcc, 0xef, 0xcd, 0x64, 0xaf, 0x5c, 0xb8, 0x03, 0x0d, 0x80, 0x78, + 0xc9, 0x30, 0xfd, 0x5a, 0x92, 0x75, 0xbc, 0x85, 0x18, 0x4d, 0x21, 0xc8, 0xe7, 0xb0, 0xed, 0xb8, + 0x4e, 0x24, 0xd5, 0xa5, 0x4f, 0x99, 0xbe, 0x29, 0x49, 0x37, 0xc4, 0xec, 0x1e, 0xcc, 0xbb, 0xe8, + 0x22, 0x76, 0xa1, 0x85, 0x73, 0xab, 0xb7, 0xf0, 0x97, 0x17, 0xb5, 0x70, 0x5e, 0xb6, 0xf0, 0xff, + 0x56, 0x6d, 0xdf, 0xd6, 0x93, 0x93, 0xd3, 0xf2, 0xda, 0x9b, 0xd3, 0xf2, 0xda, 0xdb, 0xd3, 0xf2, + 0xda, 0x0f, 0xb3, 0xb2, 0x76, 0x32, 0x2b, 0x6b, 0x6f, 0x66, 0x65, 0xed, 0xed, 0xac, 0xac, 0xfd, + 0x31, 0x2b, 0x6b, 0x3f, 0xfd, 0x59, 0x5e, 0xfb, 0xa6, 0x76, 0xf9, 0x5f, 0xbe, 0x7f, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x81, 0x06, 0x4f, 0x58, 0x17, 0x0e, 0x00, 0x00, +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UserAgent) + copy(dAtA[i:], m.UserAgent) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserAgent))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a + } + } + { + size, err := m.StageTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + { + size, err := m.RequestReceivedTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + if m.ResponseObject != nil { + { + size, err := m.ResponseObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.RequestObject != nil { + { + size, err := m.RequestObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ResponseStatus != nil { + { + size, err := m.ResponseStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.ObjectRef != nil { + { + size, err := m.ObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.SourceIPs) > 0 { + for iNdEx := len(m.SourceIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SourceIPs[iNdEx]) + copy(dAtA[i:], m.SourceIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourceIPs[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.ImpersonatedUser != nil { + { + size, err := m.ImpersonatedUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x2a + i -= len(m.RequestURI) + copy(dAtA[i:], m.RequestURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequestURI))) + i-- + dAtA[i] = 0x22 + i -= len(m.Stage) + copy(dAtA[i:], m.Stage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stage))) + i-- + dAtA[i] = 0x1a + i -= len(m.AuditID) + copy(dAtA[i:], m.AuditID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuditID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x42 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OmitManagedFields != nil { + i-- + if *m.OmitManagedFields { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + if len(m.OmitStages) > 0 { + for iNdEx := len(m.OmitStages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.OmitStages[iNdEx]) + copy(dAtA[i:], m.OmitStages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OmitStages[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.UserGroups) > 0 { + for iNdEx := len(m.UserGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UserGroups[iNdEx]) + copy(dAtA[i:], m.UserGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuditID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Stage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RequestURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.ImpersonatedUser != nil { + l = m.ImpersonatedUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.SourceIPs) > 0 { + for _, s := range m.SourceIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ObjectRef != nil { + l = m.ObjectRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseStatus != nil { + l = m.ResponseStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RequestObject != nil { + l = m.RequestObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResponseObject != nil { + l = m.ResponseObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.RequestReceivedTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.StageTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UserAgent) + n += 2 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *PolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.UserGroups) > 0 { + for _, s := range m.UserGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.OmitStages) > 0 { + for _, s := range m.OmitStages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.OmitManagedFields != nil { + n += 2 + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&Event{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `AuditID:` + fmt.Sprintf("%v", this.AuditID) + `,`, + `Stage:` + fmt.Sprintf("%v", this.Stage) + `,`, + `RequestURI:` + fmt.Sprintf("%v", this.RequestURI) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "UserInfo", "v1.UserInfo", 1), `&`, ``, 1) + `,`, + `ImpersonatedUser:` + strings.Replace(fmt.Sprintf("%v", this.ImpersonatedUser), "UserInfo", "v1.UserInfo", 1) + `,`, + `SourceIPs:` + fmt.Sprintf("%v", this.SourceIPs) + `,`, + `ObjectRef:` + strings.Replace(this.ObjectRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ResponseStatus:` + strings.Replace(fmt.Sprintf("%v", this.ResponseStatus), "Status", "v11.Status", 1) + `,`, + `RequestObject:` + strings.Replace(fmt.Sprintf("%v", this.RequestObject), "Unknown", "runtime.Unknown", 1) + `,`, + `ResponseObject:` + strings.Replace(fmt.Sprintf("%v", this.ResponseObject), "Unknown", "runtime.Unknown", 1) + `,`, + `RequestReceivedTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequestReceivedTimestamp), "MicroTime", "v11.MicroTime", 1), `&`, ``, 1) + `,`, + `StageTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StageTimestamp), "MicroTime", "v11.MicroTime", 1), `&`, ``, 1) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `UserAgent:` + fmt.Sprintf("%v", this.UserAgent) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupResources{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Policy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + fmt.Sprintf("%v", this.OmitManagedFields) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Policy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Policy", "Policy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForResources := "[]GroupResources{" + for _, f := range this.Resources { + repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "GroupResources", "GroupResources", 1), `&`, ``, 1) + "," + } + repeatedStringForResources += "}" + s := strings.Join([]string{`&PolicyRule{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `UserGroups:` + fmt.Sprintf("%v", this.UserGroups) + `,`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `Resources:` + repeatedStringForResources + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`, + `OmitManagedFields:` + valueToStringGenerated(this.OmitManagedFields) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuditID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stage = Stage(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImpersonatedUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImpersonatedUser == nil { + m.ImpersonatedUser = &v1.UserInfo{} + } + if err := m.ImpersonatedUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourceIPs = append(m.SourceIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectRef == nil { + m.ObjectRef = &ObjectReference{} + } + if err := m.ObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseStatus == nil { + m.ResponseStatus = &v11.Status{} + } + if err := m.ResponseStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequestObject == nil { + m.RequestObject = &runtime.Unknown{} + } + if err := m.RequestObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResponseObject == nil { + m.ResponseObject = &runtime.Unknown{} + } + if err := m.ResponseObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestReceivedTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RequestReceivedTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StageTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StageTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserAgent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.OmitManagedFields = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Policy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserGroups = append(m.UserGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, GroupResources{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OmitManagedFields", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OmitManagedFields = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto new file mode 100644 index 00000000..032c5869 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/generated.proto @@ -0,0 +1,287 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apiserver.pkg.apis.audit.v1; + +import "k8s.io/api/authentication/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/apiserver/pkg/apis/audit/v1"; + +// Event captures all the information that can be included in an API audit log. +message Event { + // AuditLevel at which event was generated + optional string level = 1; + + // Unique audit ID, generated for each request. + optional string auditID = 2; + + // Stage of the request handling when this event instance was generated. + optional string stage = 3; + + // RequestURI is the request URI as sent by the client to a server. + optional string requestURI = 4; + + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + optional string verb = 5; + + // Authenticated user information. + optional .k8s.io.api.authentication.v1.UserInfo user = 6; + + // Impersonated user information. + // +optional + optional .k8s.io.api.authentication.v1.UserInfo impersonatedUser = 7; + + // Source IPs, from where the request originated and intermediate proxies. + // The source IPs are listed from (in order): + // 1. X-Forwarded-For request header IPs + // 2. X-Real-Ip header, if not present in the X-Forwarded-For list + // 3. The remote address for the connection, if it doesn't match the last + // IP in the list up to here (X-Forwarded-For or X-Real-Ip). + // Note: All but the last IP can be arbitrarily set by the client. + // +optional + // +listType=atomic + repeated string sourceIPs = 8; + + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + optional string userAgent = 16; + + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + optional ObjectReference objectRef = 9; + + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Status responseStatus = 10; + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + optional .k8s.io.apimachinery.pkg.runtime.Unknown requestObject = 11; + + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + optional .k8s.io.apimachinery.pkg.runtime.Unknown responseObject = 12; + + // Time the request reached the apiserver. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime requestReceivedTimestamp = 13; + + // Time the request reached current audit stage. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime stageTimestamp = 14; + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + map annotations = 15; +} + +// EventList is a list of audit Events. +message EventList { + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Event items = 2; +} + +// GroupResources represents resource kinds in an API group. +message GroupResources { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + optional string group = 1; + + // Resources is a list of resources this rule applies to. + // + // For example: + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + // +listType=atomic + repeated string resources = 2; + + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + // +listType=atomic + repeated string resourceNames = 3; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +message ObjectReference { + // +optional + optional string resource = 1; + + // +optional + optional string namespace = 2; + + // +optional + optional string name = 3; + + // +optional + optional string uid = 4; + + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + optional string apiGroup = 5; + + // APIVersion is the version of the API group that contains the referred object. + // +optional + optional string apiVersion = 6; + + // +optional + optional string resourceVersion = 7; + + // +optional + optional string subresource = 8; +} + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +message Policy { + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + // +listType=atomic + repeated PolicyRule rules = 2; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + // +listType=atomic + repeated string omitStages = 3; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + optional bool omitManagedFields = 4; +} + +// PolicyList is a list of audit Policies. +message PolicyList { + // +optional + optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Policy items = 2; +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +message PolicyRule { + // The Level that requests matching this rule are recorded at. + optional string level = 1; + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + // +listType=atomic + repeated string users = 2; + + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + // +listType=atomic + repeated string userGroups = 3; + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + // +listType=atomic + repeated string verbs = 4; + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + // +listType=atomic + repeated GroupResources resources = 5; + + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + // +listType=atomic + repeated string namespaces = 6; + + // NonResourceURLs is a set of URL paths that should be audited. + // `*`s are allowed, but only as the full, final step in the path. + // Examples: + // - `/metrics` - Log requests for apiserver metrics + // - `/healthz*` - Log all health checks + // +optional + // +listType=atomic + repeated string nonResourceURLs = 7; + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + // +listType=atomic + repeated string omitStages = 8; + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + optional bool omitManagedFields = 9; +} + diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go new file mode 100644 index 00000000..46e3e47b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "audit.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + &Policy{}, + &PolicyList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go new file mode 100644 index 00000000..ae122d6c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/types.go @@ -0,0 +1,318 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authnv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// Header keys used by the audit system. +const ( + // Header to hold the audit ID as the request is propagated through the serving hierarchy. The + // Audit-ID header should be set by the first server to receive the request (e.g. the federation + // server or kube-aggregator). + HeaderAuditID = "Audit-ID" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling that audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated as soon as the audit handler receives the request, and before it + // is delegated down the handler chain. + StageRequestReceived Stage = "RequestReceived" + // The stage for events generated once the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted Stage = "ResponseStarted" + // The stage for events generated once the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete Stage = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic Stage = "Panic" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event captures all the information that can be included in an API audit log. +type Event struct { + metav1.TypeMeta `json:",inline"` + + // AuditLevel at which event was generated + Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"` + + // Unique audit ID, generated for each request. + AuditID types.UID `json:"auditID" protobuf:"bytes,2,opt,name=auditID,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Stage of the request handling when this event instance was generated. + Stage Stage `json:"stage" protobuf:"bytes,3,opt,name=stage,casttype=Stage"` + + // RequestURI is the request URI as sent by the client to a server. + RequestURI string `json:"requestURI" protobuf:"bytes,4,opt,name=requestURI"` + // Verb is the kubernetes verb associated with the request. + // For non-resource requests, this is the lower-cased HTTP method. + Verb string `json:"verb" protobuf:"bytes,5,opt,name=verb"` + // Authenticated user information. + User authnv1.UserInfo `json:"user" protobuf:"bytes,6,opt,name=user"` + // Impersonated user information. + // +optional + ImpersonatedUser *authnv1.UserInfo `json:"impersonatedUser,omitempty" protobuf:"bytes,7,opt,name=impersonatedUser"` + // Source IPs, from where the request originated and intermediate proxies. + // The source IPs are listed from (in order): + // 1. X-Forwarded-For request header IPs + // 2. X-Real-Ip header, if not present in the X-Forwarded-For list + // 3. The remote address for the connection, if it doesn't match the last + // IP in the list up to here (X-Forwarded-For or X-Real-Ip). + // Note: All but the last IP can be arbitrarily set by the client. + // +optional + // +listType=atomic + SourceIPs []string `json:"sourceIPs,omitempty" protobuf:"bytes,8,rep,name=sourceIPs"` + // UserAgent records the user agent string reported by the client. + // Note that the UserAgent is provided by the client, and must not be trusted. + // +optional + UserAgent string `json:"userAgent,omitempty" protobuf:"bytes,16,opt,name=userAgent"` + // Object reference this request is targeted at. + // Does not apply for List-type requests, or non-resource requests. + // +optional + ObjectRef *ObjectReference `json:"objectRef,omitempty" protobuf:"bytes,9,opt,name=objectRef"` + // The response status, populated even when the ResponseObject is not a Status type. + // For successful responses, this will only include the Code and StatusSuccess. + // For non-status type error responses, this will be auto-populated with the error Message. + // +optional + ResponseStatus *metav1.Status `json:"responseStatus,omitempty" protobuf:"bytes,10,opt,name=responseStatus"` + + // API object from the request, in JSON format. The RequestObject is recorded as-is in the request + // (possibly re-encoded as JSON), prior to version conversion, defaulting, admission or + // merging. It is an external versioned object type, and may not be a valid object on its own. + // Omitted for non-resource requests. Only logged at Request Level and higher. + // +optional + RequestObject *runtime.Unknown `json:"requestObject,omitempty" protobuf:"bytes,11,opt,name=requestObject"` + // API object returned in the response, in JSON. The ResponseObject is recorded after conversion + // to the external type, and serialized as JSON. Omitted for non-resource requests. Only logged + // at Response Level. + // +optional + ResponseObject *runtime.Unknown `json:"responseObject,omitempty" protobuf:"bytes,12,opt,name=responseObject"` + // Time the request reached the apiserver. + // +optional + RequestReceivedTimestamp metav1.MicroTime `json:"requestReceivedTimestamp" protobuf:"bytes,13,opt,name=requestReceivedTimestamp"` + // Time the request reached current audit stage. + // +optional + StageTimestamp metav1.MicroTime `json:"stageTimestamp" protobuf:"bytes,14,opt,name=stageTimestamp"` + + // Annotations is an unstructured key value map stored with an audit event that may be set by + // plugins invoked in the request serving chain, including authentication, authorization and + // admission plugins. Note that these annotations are for the audit event, and do not correspond + // to the metadata.annotations of the submitted object. Keys should uniquely identify the informing + // component to avoid name collisions (e.g. podsecuritypolicy.admission.k8s.io/policy). Values + // should be short. Annotations are included in the Metadata level. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,15,rep,name=annotations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of audit Events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy defines the configuration of audit logging, and the rules for how different request +// categories are logged. +type Policy struct { + metav1.TypeMeta `json:",inline"` + // ObjectMeta is included for interoperability with API infrastructure. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules specify the audit Level a request should be recorded at. + // A request may match multiple rules, in which case the FIRST matching rule is used. + // The default audit level is None, but can be overridden by a catch-all rule at the end of the list. + // PolicyRules are strictly ordered. + // +listType=atomic + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified per rule in which case the union of both are omitted. + // +optional + // +listType=atomic + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // This is used as a global default - a value of 'true' will omit the managed fileds, + // otherwise the managed fields will be included in the API audit log. + // Note that this can also be specified per rule in which case the value specified + // in a rule will override the global default. + // +optional + OmitManagedFields bool `json:"omitManagedFields,omitempty" protobuf:"varint,4,opt,name=omitManagedFields"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PolicyList is a list of audit Policies. +type PolicyList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Policy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PolicyRule maps requests based off metadata to an audit Level. +// Requests must match the rules of every field (an intersection of rules). +type PolicyRule struct { + // The Level that requests matching this rule are recorded at. + Level Level `json:"level" protobuf:"bytes,1,opt,name=level,casttype=Level"` + + // The users (by authenticated user name) this rule applies to. + // An empty list implies every user. + // +optional + // +listType=atomic + Users []string `json:"users,omitempty" protobuf:"bytes,2,rep,name=users"` + // The user groups this rule applies to. A user is considered matching + // if it is a member of any of the UserGroups. + // An empty list implies every user group. + // +optional + // +listType=atomic + UserGroups []string `json:"userGroups,omitempty" protobuf:"bytes,3,rep,name=userGroups"` + + // The verbs that match this rule. + // An empty list implies every verb. + // +optional + // +listType=atomic + Verbs []string `json:"verbs,omitempty" protobuf:"bytes,4,rep,name=verbs"` + + // Rules can apply to API resources (such as "pods" or "secrets"), + // non-resource URL paths (such as "/api"), or neither, but not both. + // If neither is specified, the rule is treated as a default for all URLs. + + // Resources that this rule matches. An empty list implies all kinds in all API groups. + // +optional + // +listType=atomic + Resources []GroupResources `json:"resources,omitempty" protobuf:"bytes,5,rep,name=resources"` + // Namespaces that this rule matches. + // The empty string "" matches non-namespaced resources. + // An empty list implies every namespace. + // +optional + // +listType=atomic + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,rep,name=namespaces"` + + // NonResourceURLs is a set of URL paths that should be audited. + // `*`s are allowed, but only as the full, final step in the path. + // Examples: + // - `/metrics` - Log requests for apiserver metrics + // - `/healthz*` - Log all health checks + // +optional + // +listType=atomic + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"` + + // OmitStages is a list of stages for which no events are created. Note that this can also + // be specified policy wide in which case the union of both are omitted. + // An empty list means no restrictions will apply. + // +optional + // +listType=atomic + OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"` + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + // - a value of 'true' will drop the managed fields from the API audit log + // - a value of 'false' indicates that the managed fileds should be included + // in the API audit log + // Note that the value, if specified, in this rule will override the global default + // If a value is not specified then the global default specified in + // Policy.OmitManagedFields will stand. + // +optional + OmitManagedFields *bool `json:"omitManagedFields,omitempty" protobuf:"varint,9,opt,name=omitManagedFields"` +} + +// GroupResources represents resource kinds in an API group. +type GroupResources struct { + // Group is the name of the API group that contains the resources. + // The empty string represents the core API group. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + // Resources is a list of resources this rule applies to. + // + // For example: + // - `pods` matches pods. + // - `pods/log` matches the log subresource of pods. + // - `*` matches all resources and their subresources. + // - `pods/*` matches all subresources of pods. + // - `*/scale` matches all scale subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // An empty list implies all resources and subresources in this API groups apply. + // +optional + // +listType=atomic + Resources []string `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"` + // ResourceNames is a list of resource instance names that the policy matches. + // Using this field requires Resources to be specified. + // An empty list implies that every instance of the resource is matched. + // +optional + // +listType=atomic + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,3,rep,name=resourceNames"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,1,opt,name=resource"` + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // APIGroup is the name of the API group that contains the referred object. + // The empty string represents the core API group. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,5,opt,name=apiGroup"` + // APIVersion is the version of the API group that contains the referred object. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,6,opt,name=apiVersion"` + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,7,opt,name=resourceVersion"` + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,8,opt,name=subresource"` +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go new file mode 100644 index 00000000..53cbb020 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.conversion.go @@ -0,0 +1,327 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + audit "k8s.io/apiserver/pkg/apis/audit" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*Event)(nil), (*audit.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Event_To_audit_Event(a.(*Event), b.(*audit.Event), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.Event)(nil), (*Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Event_To_v1_Event(a.(*audit.Event), b.(*Event), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*EventList)(nil), (*audit.EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_EventList_To_audit_EventList(a.(*EventList), b.(*audit.EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.EventList)(nil), (*EventList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_EventList_To_v1_EventList(a.(*audit.EventList), b.(*EventList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*GroupResources)(nil), (*audit.GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_GroupResources_To_audit_GroupResources(a.(*GroupResources), b.(*audit.GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.GroupResources)(nil), (*GroupResources)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_GroupResources_To_v1_GroupResources(a.(*audit.GroupResources), b.(*GroupResources), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ObjectReference)(nil), (*audit.ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ObjectReference_To_audit_ObjectReference(a.(*ObjectReference), b.(*audit.ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.ObjectReference)(nil), (*ObjectReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_ObjectReference_To_v1_ObjectReference(a.(*audit.ObjectReference), b.(*ObjectReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Policy)(nil), (*audit.Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Policy_To_audit_Policy(a.(*Policy), b.(*audit.Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.Policy)(nil), (*Policy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_Policy_To_v1_Policy(a.(*audit.Policy), b.(*Policy), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyList)(nil), (*audit.PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PolicyList_To_audit_PolicyList(a.(*PolicyList), b.(*audit.PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyList)(nil), (*PolicyList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyList_To_v1_PolicyList(a.(*audit.PolicyList), b.(*PolicyList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PolicyRule)(nil), (*audit.PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PolicyRule_To_audit_PolicyRule(a.(*PolicyRule), b.(*audit.PolicyRule), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*audit.PolicyRule)(nil), (*PolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_audit_PolicyRule_To_v1_PolicyRule(a.(*audit.PolicyRule), b.(*PolicyRule), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + out.Level = audit.Level(in.Level) + out.AuditID = types.UID(in.AuditID) + out.Stage = audit.Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + out.ObjectRef = (*audit.ObjectReference)(unsafe.Pointer(in.ObjectRef)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_v1_Event_To_audit_Event is an autogenerated conversion function. +func Convert_v1_Event_To_audit_Event(in *Event, out *audit.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_audit_Event(in, out, s) +} + +func autoConvert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + out.Level = Level(in.Level) + out.AuditID = types.UID(in.AuditID) + out.Stage = Stage(in.Stage) + out.RequestURI = in.RequestURI + out.Verb = in.Verb + out.User = in.User + out.ImpersonatedUser = (*authenticationv1.UserInfo)(unsafe.Pointer(in.ImpersonatedUser)) + out.SourceIPs = *(*[]string)(unsafe.Pointer(&in.SourceIPs)) + out.UserAgent = in.UserAgent + out.ObjectRef = (*ObjectReference)(unsafe.Pointer(in.ObjectRef)) + out.ResponseStatus = (*metav1.Status)(unsafe.Pointer(in.ResponseStatus)) + out.RequestObject = (*runtime.Unknown)(unsafe.Pointer(in.RequestObject)) + out.ResponseObject = (*runtime.Unknown)(unsafe.Pointer(in.ResponseObject)) + out.RequestReceivedTimestamp = in.RequestReceivedTimestamp + out.StageTimestamp = in.StageTimestamp + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + return nil +} + +// Convert_audit_Event_To_v1_Event is an autogenerated conversion function. +func Convert_audit_Event_To_v1_Event(in *audit.Event, out *Event, s conversion.Scope) error { + return autoConvert_audit_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]audit.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_audit_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_audit_EventList(in *EventList, out *audit.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_audit_EventList(in, out, s) +} + +func autoConvert_audit_EventList_To_v1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_audit_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_audit_EventList_To_v1_EventList(in *audit.EventList, out *EventList, s conversion.Scope) error { + return autoConvert_audit_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_v1_GroupResources_To_audit_GroupResources is an autogenerated conversion function. +func Convert_v1_GroupResources_To_audit_GroupResources(in *GroupResources, out *audit.GroupResources, s conversion.Scope) error { + return autoConvert_v1_GroupResources_To_audit_GroupResources(in, out, s) +} + +func autoConvert_audit_GroupResources_To_v1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + out.Group = in.Group + out.Resources = *(*[]string)(unsafe.Pointer(&in.Resources)) + out.ResourceNames = *(*[]string)(unsafe.Pointer(&in.ResourceNames)) + return nil +} + +// Convert_audit_GroupResources_To_v1_GroupResources is an autogenerated conversion function. +func Convert_audit_GroupResources_To_v1_GroupResources(in *audit.GroupResources, out *GroupResources, s conversion.Scope) error { + return autoConvert_audit_GroupResources_To_v1_GroupResources(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIGroup = in.APIGroup + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +// Convert_v1_ObjectReference_To_audit_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_audit_ObjectReference(in *ObjectReference, out *audit.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_audit_ObjectReference(in, out, s) +} + +func autoConvert_audit_ObjectReference_To_v1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + out.Resource = in.Resource + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIGroup = in.APIGroup + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.Subresource = in.Subresource + return nil +} + +// Convert_audit_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_audit_ObjectReference_To_v1_ObjectReference(in *audit.ObjectReference, out *ObjectReference, s conversion.Scope) error { + return autoConvert_audit_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields + return nil +} + +// Convert_v1_Policy_To_audit_Policy is an autogenerated conversion function. +func Convert_v1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error { + return autoConvert_v1_Policy_To_audit_Policy(in, out, s) +} + +func autoConvert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = in.OmitManagedFields + return nil +} + +// Convert_audit_Policy_To_v1_Policy is an autogenerated conversion function. +func Convert_audit_Policy_To_v1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error { + return autoConvert_audit_Policy_To_v1_Policy(in, out, s) +} + +func autoConvert_v1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]audit.Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PolicyList_To_audit_PolicyList is an autogenerated conversion function. +func Convert_v1_PolicyList_To_audit_PolicyList(in *PolicyList, out *audit.PolicyList, s conversion.Scope) error { + return autoConvert_v1_PolicyList_To_audit_PolicyList(in, out, s) +} + +func autoConvert_audit_PolicyList_To_v1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]Policy)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_audit_PolicyList_To_v1_PolicyList is an autogenerated conversion function. +func Convert_audit_PolicyList_To_v1_PolicyList(in *audit.PolicyList, out *PolicyList, s conversion.Scope) error { + return autoConvert_audit_PolicyList_To_v1_PolicyList(in, out, s) +} + +func autoConvert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + out.Level = audit.Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]audit.GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) + return nil +} + +// Convert_v1_PolicyRule_To_audit_PolicyRule is an autogenerated conversion function. +func Convert_v1_PolicyRule_To_audit_PolicyRule(in *PolicyRule, out *audit.PolicyRule, s conversion.Scope) error { + return autoConvert_v1_PolicyRule_To_audit_PolicyRule(in, out, s) +} + +func autoConvert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + out.Level = Level(in.Level) + out.Users = *(*[]string)(unsafe.Pointer(&in.Users)) + out.UserGroups = *(*[]string)(unsafe.Pointer(&in.UserGroups)) + out.Verbs = *(*[]string)(unsafe.Pointer(&in.Verbs)) + out.Resources = *(*[]GroupResources)(unsafe.Pointer(&in.Resources)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.NonResourceURLs = *(*[]string)(unsafe.Pointer(&in.NonResourceURLs)) + out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages)) + out.OmitManagedFields = (*bool)(unsafe.Pointer(in.OmitManagedFields)) + return nil +} + +// Convert_audit_PolicyRule_To_v1_PolicyRule is an autogenerated conversion function. +func Convert_audit_PolicyRule_To_v1_PolicyRule(in *audit.PolicyRule, out *PolicyRule, s conversion.Scope) error { + return autoConvert_audit_PolicyRule_To_v1_PolicyRule(in, out, s) +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..0b1b0052 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.deepcopy.go @@ -0,0 +1,297 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.User.DeepCopyInto(&out.User) + if in.ImpersonatedUser != nil { + in, out := &in.ImpersonatedUser, &out.ImpersonatedUser + *out = new(authenticationv1.UserInfo) + (*in).DeepCopyInto(*out) + } + if in.SourceIPs != nil { + in, out := &in.SourceIPs, &out.SourceIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + **out = **in + } + if in.ResponseStatus != nil { + in, out := &in.ResponseStatus, &out.ResponseStatus + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.RequestObject != nil { + in, out := &in.RequestObject, &out.RequestObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + if in.ResponseObject != nil { + in, out := &in.ResponseObject, &out.ResponseObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResources) DeepCopyInto(out *GroupResources) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources. +func (in *GroupResources) DeepCopy() *GroupResources { + if in == nil { + return nil + } + out := new(GroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]GroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + if in.OmitManagedFields != nil { + in, out := &in.OmitManagedFields, &out.OmitManagedFields + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go new file mode 100644 index 00000000..dac177e9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/v1/zz_generated.defaults.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go new file mode 100644 index 00000000..81d5add4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/audit/zz_generated.deepcopy.go @@ -0,0 +1,297 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package audit + +import ( + v1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.User.DeepCopyInto(&out.User) + if in.ImpersonatedUser != nil { + in, out := &in.ImpersonatedUser, &out.ImpersonatedUser + *out = new(v1.UserInfo) + (*in).DeepCopyInto(*out) + } + if in.SourceIPs != nil { + in, out := &in.SourceIPs, &out.SourceIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectRef != nil { + in, out := &in.ObjectRef, &out.ObjectRef + *out = new(ObjectReference) + **out = **in + } + if in.ResponseStatus != nil { + in, out := &in.ResponseStatus, &out.ResponseStatus + *out = new(metav1.Status) + (*in).DeepCopyInto(*out) + } + if in.RequestObject != nil { + in, out := &in.RequestObject, &out.RequestObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + if in.ResponseObject != nil { + in, out := &in.ResponseObject, &out.ResponseObject + *out = new(runtime.Unknown) + (*in).DeepCopyInto(*out) + } + in.RequestReceivedTimestamp.DeepCopyInto(&out.RequestReceivedTimestamp) + in.StageTimestamp.DeepCopyInto(&out.StageTimestamp) + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResources) DeepCopyInto(out *GroupResources) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResources. +func (in *GroupResources) DeepCopy() *GroupResources { + if in == nil { + return nil + } + out := new(GroupResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyList) DeepCopyInto(out *PolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Policy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList. +func (in *PolicyList) DeepCopy() *PolicyList { + if in == nil { + return nil + } + out := new(PolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UserGroups != nil { + in, out := &in.UserGroups, &out.UserGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]GroupResources, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OmitStages != nil { + in, out := &in.OmitStages, &out.OmitStages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + if in.OmitManagedFields != nil { + in, out := &in.OmitManagedFields, &out.OmitManagedFields + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apiserver/pkg/apis/cel/config.go b/vendor/k8s.io/apiserver/pkg/apis/cel/config.go new file mode 100644 index 00000000..319548cd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/apis/cel/config.go @@ -0,0 +1,45 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +const ( + // PerCallLimit specify the actual cost limit per CEL validation call + // current PerCallLimit gives roughly 0.1 second for each expression validation call + PerCallLimit = 1000000 + + // RuntimeCELCostBudget is the overall cost budget for runtime CEL validation cost per ValidatingAdmissionPolicyBinding or CustomResource + // current RuntimeCELCostBudget gives roughly 1 seconds for the validation + RuntimeCELCostBudget = 10000000 + + // RuntimeCELCostBudgetMatchConditions is the overall cost budget for runtime CEL validation cost on matchConditions per object with matchConditions + // this is per webhook for validatingwebhookconfigurations and mutatingwebhookconfigurations or per ValidatingAdmissionPolicyBinding + // current RuntimeCELCostBudgetMatchConditions gives roughly 1/4 seconds for the validation + RuntimeCELCostBudgetMatchConditions = 2500000 + + // CheckFrequency configures the number of iterations within a comprehension to evaluate + // before checking whether the function evaluation has been interrupted + CheckFrequency = 100 + + // MaxRequestSizeBytes is the maximum size of a request to the API server + // TODO(DangerOnTheRanger): wire in MaxRequestBodyBytes from apiserver/pkg/server/options/server_run_options.go to make this configurable + // Note that even if server_run_options.go becomes configurable in the future, this cost constant should be fixed and it should be the max allowed request size for the server + MaxRequestSizeBytes = int64(3 * 1024 * 1024) + + // MaxEvaluatedMessageExpressionSizeBytes represents the largest-allowable string generated + // by a messageExpression field + MaxEvaluatedMessageExpressionSizeBytes = 5 * 1024 +) diff --git a/vendor/k8s.io/apiserver/pkg/audit/OWNERS b/vendor/k8s.io/apiserver/pkg/audit/OWNERS new file mode 100644 index 00000000..60f38f13 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-auth-audit-approvers +reviewers: + - sig-auth-audit-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/apiserver/pkg/audit/context.go b/vendor/k8s.io/apiserver/pkg/audit/context.go new file mode 100644 index 00000000..96485873 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/context.go @@ -0,0 +1,188 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "context" + "sync" + + "k8s.io/apimachinery/pkg/types" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/klog/v2" +) + +// The key type is unexported to prevent collisions +type key int + +// auditKey is the context key for storing the audit context that is being +// captured and the evaluated policy that applies to the given request. +const auditKey key = iota + +// AuditContext holds the information for constructing the audit events for the current request. +type AuditContext struct { + // RequestAuditConfig is the audit configuration that applies to the request + RequestAuditConfig RequestAuditConfig + + // Event is the audit Event object that is being captured to be written in + // the API audit log. + Event auditinternal.Event + + // annotationMutex guards event.Annotations + annotationMutex sync.Mutex +} + +// Enabled checks whether auditing is enabled for this audit context. +func (ac *AuditContext) Enabled() bool { + // Note: An unset Level should be considered Enabled, so that request data (e.g. annotations) + // can still be captured before the audit policy is evaluated. + return ac != nil && ac.RequestAuditConfig.Level != auditinternal.LevelNone +} + +// AddAuditAnnotation sets the audit annotation for the given key, value pair. +// It is safe to call at most parts of request flow that come after WithAuditAnnotations. +// The notable exception being that this function must not be called via a +// defer statement (i.e. after ServeHTTP) in a handler that runs before WithAudit +// as at that point the audit event has already been sent to the audit sink. +// Handlers that are unaware of their position in the overall request flow should +// prefer AddAuditAnnotation over LogAnnotation to avoid dropping annotations. +func AddAuditAnnotation(ctx context.Context, key, value string) { + ac := AuditContextFrom(ctx) + if !ac.Enabled() { + return + } + + ac.annotationMutex.Lock() + defer ac.annotationMutex.Unlock() + + addAuditAnnotationLocked(ac, key, value) +} + +// AddAuditAnnotations is a bulk version of AddAuditAnnotation. Refer to AddAuditAnnotation for +// restrictions on when this can be called. +// keysAndValues are the key-value pairs to add, and must have an even number of items. +func AddAuditAnnotations(ctx context.Context, keysAndValues ...string) { + ac := AuditContextFrom(ctx) + if !ac.Enabled() { + return + } + + ac.annotationMutex.Lock() + defer ac.annotationMutex.Unlock() + + if len(keysAndValues)%2 != 0 { + klog.Errorf("Dropping mismatched audit annotation %q", keysAndValues[len(keysAndValues)-1]) + } + for i := 0; i < len(keysAndValues); i += 2 { + addAuditAnnotationLocked(ac, keysAndValues[i], keysAndValues[i+1]) + } +} + +// AddAuditAnnotationsMap is a bulk version of AddAuditAnnotation. Refer to AddAuditAnnotation for +// restrictions on when this can be called. +func AddAuditAnnotationsMap(ctx context.Context, annotations map[string]string) { + ac := AuditContextFrom(ctx) + if !ac.Enabled() { + return + } + + ac.annotationMutex.Lock() + defer ac.annotationMutex.Unlock() + + for k, v := range annotations { + addAuditAnnotationLocked(ac, k, v) + } +} + +// addAuditAnnotationLocked records the audit annotation on the event. +func addAuditAnnotationLocked(ac *AuditContext, key, value string) { + ae := &ac.Event + + if ae.Annotations == nil { + ae.Annotations = make(map[string]string) + } + if v, ok := ae.Annotations[key]; ok && v != value { + klog.Warningf("Failed to set annotations[%q] to %q for audit:%q, it has already been set to %q", key, value, ae.AuditID, ae.Annotations[key]) + return + } + ae.Annotations[key] = value +} + +// WithAuditContext returns a new context that stores the AuditContext. +func WithAuditContext(parent context.Context) context.Context { + if AuditContextFrom(parent) != nil { + return parent // Avoid double registering. + } + + return genericapirequest.WithValue(parent, auditKey, &AuditContext{}) +} + +// AuditEventFrom returns the audit event struct on the ctx +func AuditEventFrom(ctx context.Context) *auditinternal.Event { + if ac := AuditContextFrom(ctx); ac.Enabled() { + return &ac.Event + } + return nil +} + +// AuditContextFrom returns the pair of the audit configuration object +// that applies to the given request and the audit event that is going to +// be written to the API audit log. +func AuditContextFrom(ctx context.Context) *AuditContext { + ev, _ := ctx.Value(auditKey).(*AuditContext) + return ev +} + +// WithAuditID sets the AuditID on the AuditContext. The AuditContext must already be present in the +// request context. If the specified auditID is empty, no value is set. +func WithAuditID(ctx context.Context, auditID types.UID) { + if auditID == "" { + return + } + if ac := AuditContextFrom(ctx); ac != nil { + ac.Event.AuditID = auditID + } +} + +// AuditIDFrom returns the value of the audit ID from the request context, along with whether +// auditing is enabled. +func AuditIDFrom(ctx context.Context) (types.UID, bool) { + if ac := AuditContextFrom(ctx); ac != nil { + return ac.Event.AuditID, true + } + return "", false +} + +// GetAuditIDTruncated returns the audit ID (truncated) from the request context. +// If the length of the Audit-ID value exceeds the limit, we truncate it to keep +// the first N (maxAuditIDLength) characters. +// This is intended to be used in logging only. +func GetAuditIDTruncated(ctx context.Context) string { + auditID, ok := AuditIDFrom(ctx) + if !ok { + return "" + } + + // if the user has specified a very long audit ID then we will use the first N characters + // Note: assuming Audit-ID header is in ASCII + const maxAuditIDLength = 64 + if len(auditID) > maxAuditIDLength { + auditID = auditID[:maxAuditIDLength] + } + + return string(auditID) +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/evaluator.go b/vendor/k8s.io/apiserver/pkg/audit/evaluator.go new file mode 100644 index 00000000..f9664fef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/evaluator.go @@ -0,0 +1,45 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// RequestAuditConfig is the evaluated audit configuration that is applicable to +// a given request. PolicyRuleEvaluator evaluates the audit policy against the +// authorizer attributes and returns a RequestAuditConfig that applies to the request. +type RequestAuditConfig struct { + // Level at which the request is being audited at + Level audit.Level + + // OmitStages is the stages that need to be omitted from being audited. + OmitStages []audit.Stage + + // OmitManagedFields indicates whether to omit the managed fields of the request + // and response bodies from being written to the API audit log. + OmitManagedFields bool +} + +// PolicyRuleEvaluator exposes methods for evaluating the policy rules. +type PolicyRuleEvaluator interface { + // EvaluatePolicyRule evaluates the audit policy of the apiserver against + // the given authorizer attributes and returns the audit configuration that + // is applicable to the given equest. + EvaluatePolicyRule(authorizer.Attributes) RequestAuditConfig +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/format.go b/vendor/k8s.io/apiserver/pkg/audit/format.go new file mode 100644 index 00000000..9c0784a3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/format.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "fmt" + "strconv" + "strings" + "time" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" +) + +// EventString creates a 1-line text representation of an audit event, using a subset of the +// information in the event struct. +func EventString(ev *auditinternal.Event) string { + username := "" + groups := "" + if len(ev.User.Username) > 0 { + username = ev.User.Username + if len(ev.User.Groups) > 0 { + groups = auditStringSlice(ev.User.Groups) + } + } + asuser := "" + asgroups := "" + if ev.ImpersonatedUser != nil { + asuser = ev.ImpersonatedUser.Username + if ev.ImpersonatedUser.Groups != nil { + asgroups = auditStringSlice(ev.ImpersonatedUser.Groups) + } + } + + namespace := "" + if ev.ObjectRef != nil && len(ev.ObjectRef.Namespace) != 0 { + namespace = ev.ObjectRef.Namespace + } + + response := "" + if ev.ResponseStatus != nil { + response = strconv.Itoa(int(ev.ResponseStatus.Code)) + } + + ip := "" + if len(ev.SourceIPs) > 0 { + ip = ev.SourceIPs[0] + } + + return fmt.Sprintf("%s AUDIT: id=%q stage=%q ip=%q method=%q user=%q groups=%q as=%q asgroups=%q user-agent=%q namespace=%q uri=%q response=\"%s\"", + ev.RequestReceivedTimestamp.Format(time.RFC3339Nano), ev.AuditID, ev.Stage, ip, ev.Verb, username, groups, asuser, asgroups, ev.UserAgent, namespace, ev.RequestURI, response) +} + +func auditStringSlice(inList []string) string { + quotedElements := make([]string, len(inList)) + for i, in := range inList { + quotedElements[i] = fmt.Sprintf("%q", in) + } + return strings.Join(quotedElements, ",") +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/metrics.go b/vendor/k8s.io/apiserver/pkg/audit/metrics.go new file mode 100644 index 00000000..729a16ee --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/metrics.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "context" + "fmt" + + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +const ( + subsystem = "apiserver_audit" +) + +/* + * By default, all the following metrics are defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var ( + eventCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "event_total", + Help: "Counter of audit events generated and sent to the audit backend.", + StabilityLevel: metrics.ALPHA, + }) + errorCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "error_total", + Help: "Counter of audit events that failed to be audited properly. " + + "Plugin identifies the plugin affected by the error.", + StabilityLevel: metrics.ALPHA, + }, + []string{"plugin"}, + ) + levelCounter = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "level_total", + Help: "Counter of policy levels for audit events (1 per request).", + StabilityLevel: metrics.ALPHA, + }, + []string{"level"}, + ) + + ApiserverAuditDroppedCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: subsystem, + Name: "requests_rejected_total", + Help: "Counter of apiserver requests rejected due to an error " + + "in audit logging backend.", + StabilityLevel: metrics.ALPHA, + }, + ) +) + +func init() { + legacyregistry.MustRegister(eventCounter) + legacyregistry.MustRegister(errorCounter) + legacyregistry.MustRegister(levelCounter) + legacyregistry.MustRegister(ApiserverAuditDroppedCounter) +} + +// ObserveEvent updates the relevant prometheus metrics for the generated audit event. +func ObserveEvent(ctx context.Context) { + eventCounter.WithContext(ctx).Inc() +} + +// ObservePolicyLevel updates the relevant prometheus metrics with the audit level for a request. +func ObservePolicyLevel(ctx context.Context, level auditinternal.Level) { + levelCounter.WithContext(ctx).WithLabelValues(string(level)).Inc() +} + +// HandlePluginError handles an error that occurred in an audit plugin. This method should only be +// used if the error may have prevented the audit event from being properly recorded. The events are +// logged to the debug log. +func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Event) { + // Count the error. + errorCounter.WithLabelValues(plugin).Add(float64(len(impacted))) + + // Log the audit events to the debug log. + msg := fmt.Sprintf("Error in audit plugin '%s' affecting %d audit events: %v\nImpacted events:\n", + plugin, len(impacted), err) + for _, ev := range impacted { + msg = msg + EventString(ev) + "\n" + } + klog.Error(msg) +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/request.go b/vendor/k8s.io/apiserver/pkg/audit/request.go new file mode 100644 index 00000000..9185278f --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/request.go @@ -0,0 +1,312 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "bytes" + "context" + "fmt" + "net/http" + "time" + + authnv1 "k8s.io/api/authentication/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilnet "k8s.io/apimachinery/pkg/util/net" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/klog/v2" +) + +const ( + maxUserAgentLength = 1024 + userAgentTruncateSuffix = "...TRUNCATED" +) + +func LogRequestMetadata(ctx context.Context, req *http.Request, requestReceivedTimestamp time.Time, level auditinternal.Level, attribs authorizer.Attributes) { + ac := AuditContextFrom(ctx) + if !ac.Enabled() { + return + } + ev := &ac.Event + + ev.RequestReceivedTimestamp = metav1.NewMicroTime(requestReceivedTimestamp) + ev.Verb = attribs.GetVerb() + ev.RequestURI = req.URL.RequestURI() + ev.UserAgent = maybeTruncateUserAgent(req) + ev.Level = level + + ips := utilnet.SourceIPs(req) + ev.SourceIPs = make([]string, len(ips)) + for i := range ips { + ev.SourceIPs[i] = ips[i].String() + } + + if user := attribs.GetUser(); user != nil { + ev.User.Username = user.GetName() + ev.User.Extra = map[string]authnv1.ExtraValue{} + for k, v := range user.GetExtra() { + ev.User.Extra[k] = authnv1.ExtraValue(v) + } + ev.User.Groups = user.GetGroups() + ev.User.UID = user.GetUID() + } + + if attribs.IsResourceRequest() { + ev.ObjectRef = &auditinternal.ObjectReference{ + Namespace: attribs.GetNamespace(), + Name: attribs.GetName(), + Resource: attribs.GetResource(), + Subresource: attribs.GetSubresource(), + APIGroup: attribs.GetAPIGroup(), + APIVersion: attribs.GetAPIVersion(), + } + } +} + +// LogImpersonatedUser fills in the impersonated user attributes into an audit event. +func LogImpersonatedUser(ae *auditinternal.Event, user user.Info) { + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + ae.ImpersonatedUser = &authnv1.UserInfo{ + Username: user.GetName(), + } + ae.ImpersonatedUser.Groups = user.GetGroups() + ae.ImpersonatedUser.UID = user.GetUID() + ae.ImpersonatedUser.Extra = map[string]authnv1.ExtraValue{} + for k, v := range user.GetExtra() { + ae.ImpersonatedUser.Extra[k] = authnv1.ExtraValue(v) + } +} + +// LogRequestObject fills in the request object into an audit event. The passed runtime.Object +// will be converted to the given gv. +func LogRequestObject(ctx context.Context, obj runtime.Object, objGV schema.GroupVersion, gvr schema.GroupVersionResource, subresource string, s runtime.NegotiatedSerializer) { + ae := AuditEventFrom(ctx) + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + + // complete ObjectRef + if ae.ObjectRef == nil { + ae.ObjectRef = &auditinternal.ObjectReference{} + } + + // meta.Accessor is more general than ObjectMetaAccessor, but if it fails, we can just skip setting these bits + if meta, err := meta.Accessor(obj); err == nil { + if len(ae.ObjectRef.Namespace) == 0 { + ae.ObjectRef.Namespace = meta.GetNamespace() + } + if len(ae.ObjectRef.Name) == 0 { + ae.ObjectRef.Name = meta.GetName() + } + if len(ae.ObjectRef.UID) == 0 { + ae.ObjectRef.UID = meta.GetUID() + } + if len(ae.ObjectRef.ResourceVersion) == 0 { + ae.ObjectRef.ResourceVersion = meta.GetResourceVersion() + } + } + if len(ae.ObjectRef.APIVersion) == 0 { + ae.ObjectRef.APIGroup = gvr.Group + ae.ObjectRef.APIVersion = gvr.Version + } + if len(ae.ObjectRef.Resource) == 0 { + ae.ObjectRef.Resource = gvr.Resource + } + if len(ae.ObjectRef.Subresource) == 0 { + ae.ObjectRef.Subresource = subresource + } + + if ae.Level.Less(auditinternal.LevelRequest) { + return + } + + if shouldOmitManagedFields(ctx) { + copy, ok, err := copyWithoutManagedFields(obj) + if err != nil { + klog.ErrorS(err, "Error while dropping managed fields from the request", "auditID", ae.AuditID) + } + if ok { + obj = copy + } + } + + // TODO(audit): hook into the serializer to avoid double conversion + var err error + ae.RequestObject, err = encodeObject(obj, objGV, s) + if err != nil { + // TODO(audit): add error slice to audit event struct + klog.ErrorS(err, "Encoding failed of request object", "auditID", ae.AuditID, "gvr", gvr.String(), "obj", obj) + return + } +} + +// LogRequestPatch fills in the given patch as the request object into an audit event. +func LogRequestPatch(ctx context.Context, patch []byte) { + ae := AuditEventFrom(ctx) + if ae == nil || ae.Level.Less(auditinternal.LevelRequest) { + return + } + + ae.RequestObject = &runtime.Unknown{ + Raw: patch, + ContentType: runtime.ContentTypeJSON, + } +} + +// LogResponseObject fills in the response object into an audit event. The passed runtime.Object +// will be converted to the given gv. +func LogResponseObject(ctx context.Context, obj runtime.Object, gv schema.GroupVersion, s runtime.NegotiatedSerializer) { + ae := AuditEventFrom(ctx) + if ae == nil || ae.Level.Less(auditinternal.LevelMetadata) { + return + } + if status, ok := obj.(*metav1.Status); ok { + // selectively copy the bounded fields. + ae.ResponseStatus = &metav1.Status{ + Status: status.Status, + Message: status.Message, + Reason: status.Reason, + Details: status.Details, + Code: status.Code, + } + } + + if ae.Level.Less(auditinternal.LevelRequestResponse) { + return + } + + if shouldOmitManagedFields(ctx) { + copy, ok, err := copyWithoutManagedFields(obj) + if err != nil { + klog.ErrorS(err, "Error while dropping managed fields from the response", "auditID", ae.AuditID) + } + if ok { + obj = copy + } + } + + // TODO(audit): hook into the serializer to avoid double conversion + var err error + ae.ResponseObject, err = encodeObject(obj, gv, s) + if err != nil { + klog.ErrorS(err, "Encoding failed of response object", "auditID", ae.AuditID, "obj", obj) + } +} + +func encodeObject(obj runtime.Object, gv schema.GroupVersion, serializer runtime.NegotiatedSerializer) (*runtime.Unknown, error) { + const mediaType = runtime.ContentTypeJSON + info, ok := runtime.SerializerInfoForMediaType(serializer.SupportedMediaTypes(), mediaType) + if !ok { + return nil, fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) + } + + enc := serializer.EncoderForVersion(info.Serializer, gv) + var buf bytes.Buffer + if err := enc.Encode(obj, &buf); err != nil { + return nil, fmt.Errorf("encoding failed: %v", err) + } + + return &runtime.Unknown{ + Raw: buf.Bytes(), + ContentType: mediaType, + }, nil +} + +// truncate User-Agent if too long, otherwise return it directly. +func maybeTruncateUserAgent(req *http.Request) string { + ua := req.UserAgent() + if len(ua) > maxUserAgentLength { + ua = ua[:maxUserAgentLength] + userAgentTruncateSuffix + } + + return ua +} + +// copyWithoutManagedFields will make a deep copy of the specified object and +// will discard the managed fields from the copy. +// The specified object is expected to be a meta.Object or a "list". +// The specified object obj is treated as readonly and hence not mutated. +// On return, an error is set if the function runs into any error while +// removing the managed fields, the boolean value is true if the copy has +// been made successfully, otherwise false. +func copyWithoutManagedFields(obj runtime.Object) (runtime.Object, bool, error) { + isAccessor := true + if _, err := meta.Accessor(obj); err != nil { + isAccessor = false + } + isList := meta.IsListType(obj) + _, isTable := obj.(*metav1.Table) + if !isAccessor && !isList && !isTable { + return nil, false, nil + } + + // TODO a deep copy isn't really needed here, figure out how we can reliably + // use shallow copy here to omit the manageFields. + copy := obj.DeepCopyObject() + + if isAccessor { + if err := removeManagedFields(copy); err != nil { + return nil, false, err + } + } + + if isList { + if err := meta.EachListItem(copy, removeManagedFields); err != nil { + return nil, false, err + } + } + + if isTable { + table := copy.(*metav1.Table) + for i := range table.Rows { + rowObj := table.Rows[i].Object + if err := removeManagedFields(rowObj.Object); err != nil { + return nil, false, err + } + } + } + + return copy, true, nil +} + +func removeManagedFields(obj runtime.Object) error { + if obj == nil { + return nil + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetManagedFields(nil) + return nil +} + +func shouldOmitManagedFields(ctx context.Context) bool { + if auditContext := AuditContextFrom(ctx); auditContext != nil { + return auditContext.RequestAuditConfig.OmitManagedFields + } + + // If we can't decide, return false to maintain current behavior which is + // to retain the manage fields in the audit. + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/scheme.go b/vendor/k8s.io/apiserver/pkg/audit/scheme.go new file mode 100644 index 00000000..1ebbc64b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/scheme.go @@ -0,0 +1,38 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: Delete this file if we generate a clientset. +package audit + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/apis/audit/v1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) + +func init() { + metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(v1.AddToScheme(Scheme)) + utilruntime.Must(auditinternal.AddToScheme(Scheme)) + utilruntime.Must(Scheme.SetVersionPriority(v1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/types.go b/vendor/k8s.io/apiserver/pkg/audit/types.go new file mode 100644 index 00000000..b78bd086 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/types.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + auditinternal "k8s.io/apiserver/pkg/apis/audit" +) + +type Sink interface { + // ProcessEvents handles events. Per audit ID it might be that ProcessEvents is called up to three times. + // Errors might be logged by the sink itself. If an error should be fatal, leading to an internal + // error, ProcessEvents is supposed to panic. The event must not be mutated and is reused by the caller + // after the call returns, i.e. the sink has to make a deepcopy to keep a copy around if necessary. + // Returns true on success, may return false on error. + ProcessEvents(events ...*auditinternal.Event) bool +} + +type Backend interface { + Sink + + // Run will initialize the backend. It must not block, but may run go routines in the background. If + // stopCh is closed, it is supposed to stop them. Run will be called before the first call to ProcessEvents. + Run(stopCh <-chan struct{}) error + + // Shutdown will synchronously shut down the backend while making sure that all pending + // events are delivered. It can be assumed that this method is called after + // the stopCh channel passed to the Run method has been closed. + Shutdown() + + // Returns the backend PluginName. + String() string +} diff --git a/vendor/k8s.io/apiserver/pkg/audit/union.go b/vendor/k8s.io/apiserver/pkg/audit/union.go new file mode 100644 index 00000000..0766a920 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/audit/union.go @@ -0,0 +1,71 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package audit + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/errors" + auditinternal "k8s.io/apiserver/pkg/apis/audit" +) + +// Union returns an audit Backend which logs events to a set of backends. The returned +// Sink implementation blocks in turn for each call to ProcessEvents. +func Union(backends ...Backend) Backend { + if len(backends) == 1 { + return backends[0] + } + return union{backends} +} + +type union struct { + backends []Backend +} + +func (u union) ProcessEvents(events ...*auditinternal.Event) bool { + success := true + for _, backend := range u.backends { + success = backend.ProcessEvents(events...) && success + } + return success +} + +func (u union) Run(stopCh <-chan struct{}) error { + var funcs []func() error + for _, backend := range u.backends { + backend := backend + funcs = append(funcs, func() error { + return backend.Run(stopCh) + }) + } + return errors.AggregateGoroutines(funcs...) +} + +func (u union) Shutdown() { + for _, backend := range u.backends { + backend.Shutdown() + } +} + +func (u union) String() string { + var backendStrings []string + for _, backend := range u.backends { + backendStrings = append(backendStrings, fmt.Sprintf("%s", backend)) + } + return fmt.Sprintf("union[%s]", strings.Join(backendStrings, ",")) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go new file mode 100644 index 00000000..bcf7eb4b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audagnostic.go @@ -0,0 +1,90 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "fmt" + "net/http" +) + +func authenticate(ctx context.Context, implicitAuds Audiences, authenticate func() (*Response, bool, error)) (*Response, bool, error) { + targetAuds, ok := AudiencesFrom(ctx) + // We can remove this once api audiences is never empty. That will probably + // be N releases after TokenRequest is GA. + if !ok { + return authenticate() + } + auds := implicitAuds.Intersect(targetAuds) + if len(auds) == 0 { + return nil, false, nil + } + resp, ok, err := authenticate() + if err != nil || !ok { + return nil, false, err + } + if len(resp.Audiences) > 0 { + // maybe the authenticator was audience aware after all. + return nil, false, fmt.Errorf("audience agnostic authenticator wrapped an authenticator that returned audiences: %q", resp.Audiences) + } + resp.Audiences = auds + return resp, true, nil +} + +type audAgnosticRequestAuthenticator struct { + implicit Audiences + delegate Request +} + +var _ = Request(&audAgnosticRequestAuthenticator{}) + +func (a *audAgnosticRequestAuthenticator) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return authenticate(req.Context(), a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateRequest(req) + }) +} + +// WrapAudienceAgnosticRequest wraps an audience agnostic request authenticator +// to restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticRequest(implicit Audiences, delegate Request) Request { + return &audAgnosticRequestAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} + +type audAgnosticTokenAuthenticator struct { + implicit Audiences + delegate Token +} + +var _ = Token(&audAgnosticTokenAuthenticator{}) + +func (a *audAgnosticTokenAuthenticator) AuthenticateToken(ctx context.Context, tok string) (*Response, bool, error) { + return authenticate(ctx, a.implicit, func() (*Response, bool, error) { + return a.delegate.AuthenticateToken(ctx, tok) + }) +} + +// WrapAudienceAgnosticToken wraps an audience agnostic token authenticator to +// restrict its accepted audiences to a set of implicit audiences. +func WrapAudienceAgnosticToken(implicit Audiences, delegate Token) Token { + return &audAgnosticTokenAuthenticator{ + implicit: implicit, + delegate: delegate, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go new file mode 100644 index 00000000..2a3a9188 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/audiences.go @@ -0,0 +1,63 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import "context" + +// Audiences is a container for the Audiences of a token. +type Audiences []string + +// The key type is unexported to prevent collisions +type key int + +const ( + // audiencesKey is the context key for request audiences. + audiencesKey key = iota +) + +// WithAudiences returns a context that stores a request's expected audiences. +func WithAudiences(ctx context.Context, auds Audiences) context.Context { + return context.WithValue(ctx, audiencesKey, auds) +} + +// AudiencesFrom returns a request's expected audiences stored in the request context. +func AudiencesFrom(ctx context.Context) (Audiences, bool) { + auds, ok := ctx.Value(audiencesKey).(Audiences) + return auds, ok +} + +// Has checks if Audiences contains a specific audiences. +func (a Audiences) Has(taud string) bool { + for _, aud := range a { + if aud == taud { + return true + } + } + return false +} + +// Intersect intersects Audiences with a target Audiences and returns all +// elements in both. +func (a Audiences) Intersect(tauds Audiences) Audiences { + selected := Audiences{} + for _, taud := range tauds { + if a.Has(taud) { + selected = append(selected, taud) + } + } + return selected +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go new file mode 100644 index 00000000..8ff979b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go @@ -0,0 +1,65 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticator + +import ( + "context" + "net/http" + + "k8s.io/apiserver/pkg/authentication/user" +) + +// Token checks a string value against a backing authentication store and +// returns a Response or an error if the token could not be checked. +type Token interface { + AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) +} + +// Request attempts to extract authentication information from a request and +// returns a Response or an error if the request could not be checked. +type Request interface { + AuthenticateRequest(req *http.Request) (*Response, bool, error) +} + +// TokenFunc is a function that implements the Token interface. +type TokenFunc func(ctx context.Context, token string) (*Response, bool, error) + +// AuthenticateToken implements authenticator.Token. +func (f TokenFunc) AuthenticateToken(ctx context.Context, token string) (*Response, bool, error) { + return f(ctx, token) +} + +// RequestFunc is a function that implements the Request interface. +type RequestFunc func(req *http.Request) (*Response, bool, error) + +// AuthenticateRequest implements authenticator.Request. +func (f RequestFunc) AuthenticateRequest(req *http.Request) (*Response, bool, error) { + return f(req) +} + +// Response is the struct returned by authenticator interfaces upon successful +// authentication. It contains information about whether the authenticator +// authenticated the request, information about the context of the +// authentication, and information about the authenticated user. +type Response struct { + // Audiences is the set of audiences the authenticator was able to validate + // the token against. If the authenticator is not audience aware, this field + // will be empty. + Audiences Audiences + // User is the UserInfo associated with the authentication context. + User user.Info +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go new file mode 100644 index 00000000..76ef4473 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go @@ -0,0 +1,127 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "errors" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/group" + "k8s.io/apiserver/pkg/authentication/request/anonymous" + "k8s.io/apiserver/pkg/authentication/request/bearertoken" + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + unionauth "k8s.io/apiserver/pkg/authentication/request/union" + "k8s.io/apiserver/pkg/authentication/request/websocket" + "k8s.io/apiserver/pkg/authentication/request/x509" + "k8s.io/apiserver/pkg/authentication/token/cache" + "k8s.io/apiserver/pkg/server/dynamiccertificates" + webhooktoken "k8s.io/apiserver/plugin/pkg/authenticator/token/webhook" + authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1" + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// DelegatingAuthenticatorConfig is the minimal configuration needed to create an authenticator +// built to delegate authentication to a kube API server +type DelegatingAuthenticatorConfig struct { + Anonymous *apiserver.AnonymousAuthConfig + + // TokenAccessReviewClient is a client to do token review. It can be nil. Then every token is ignored. + TokenAccessReviewClient authenticationclient.AuthenticationV1Interface + + // TokenAccessReviewTimeout specifies a time limit for requests made by the authorization webhook client. + TokenAccessReviewTimeout time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authentication webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff + + // CacheTTL is the length of time that a token authentication answer will be cached. + CacheTTL time.Duration + + // CAContentProvider are the options for verifying incoming connections using mTLS and directly assigning to users. + // Generally this is the CA bundle file used to authenticate client certificates + // If this is nil, then mTLS will not be used. + ClientCertificateCAContentProvider dynamiccertificates.CAContentProvider + + APIAudiences authenticator.Audiences + + RequestHeaderConfig *RequestHeaderConfig +} + +func (c DelegatingAuthenticatorConfig) New() (authenticator.Request, *spec.SecurityDefinitions, error) { + authenticators := []authenticator.Request{} + securityDefinitions := spec.SecurityDefinitions{} + + // front-proxy first, then remote + // Add the front proxy authenticator if requested + if c.RequestHeaderConfig != nil { + requestHeaderAuthenticator := headerrequest.NewDynamicVerifyOptionsSecure( + c.RequestHeaderConfig.CAContentProvider.VerifyOptions, + c.RequestHeaderConfig.AllowedClientNames, + c.RequestHeaderConfig.UsernameHeaders, + c.RequestHeaderConfig.GroupHeaders, + c.RequestHeaderConfig.ExtraHeaderPrefixes, + ) + authenticators = append(authenticators, requestHeaderAuthenticator) + } + + // x509 client cert auth + if c.ClientCertificateCAContentProvider != nil { + authenticators = append(authenticators, x509.NewDynamic(c.ClientCertificateCAContentProvider.VerifyOptions, x509.CommonNameUserConversion)) + } + + if c.TokenAccessReviewClient != nil { + if c.WebhookRetryBackoff == nil { + return nil, nil, errors.New("retry backoff parameters for delegating authentication webhook has not been specified") + } + tokenAuth, err := webhooktoken.NewFromInterface(c.TokenAccessReviewClient, c.APIAudiences, *c.WebhookRetryBackoff, c.TokenAccessReviewTimeout, webhooktoken.AuthenticatorMetrics{ + RecordRequestTotal: RecordRequestTotal, + RecordRequestLatency: RecordRequestLatency, + }) + if err != nil { + return nil, nil, err + } + cachingTokenAuth := cache.New(tokenAuth, false, c.CacheTTL, c.CacheTTL) + authenticators = append(authenticators, bearertoken.New(cachingTokenAuth), websocket.NewProtocolAuthenticator(cachingTokenAuth)) + + securityDefinitions["BearerToken"] = &spec.SecurityScheme{ + SecuritySchemeProps: spec.SecuritySchemeProps{ + Type: "apiKey", + Name: "authorization", + In: "header", + Description: "Bearer Token authentication", + }, + } + } + + if len(authenticators) == 0 { + if c.Anonymous != nil && c.Anonymous.Enabled { + return anonymous.NewAuthenticator(c.Anonymous.Conditions), &securityDefinitions, nil + } + return nil, nil, errors.New("no authentication method configured") + } + + authenticator := group.NewAuthenticatedGroupAdder(unionauth.New(authenticators...)) + if c.Anonymous != nil && c.Anonymous.Enabled { + authenticator = unionauth.NewFailOnError(authenticator, anonymous.NewAuthenticator(c.Anonymous.Conditions)) + } + return authenticator, &securityDefinitions, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go new file mode 100644 index 00000000..fe51afcb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/request/bearertoken" + "k8s.io/apiserver/pkg/authentication/token/tokenfile" + "k8s.io/apiserver/pkg/authentication/user" +) + +// NewFromTokens returns an authenticator.Request or an error +func NewFromTokens(tokens map[string]*user.DefaultInfo, audiences authenticator.Audiences) authenticator.Request { + return bearertoken.New(authenticator.WrapAudienceAgnosticToken(audiences, tokenfile.New(tokens))) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/metrics.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/metrics.go new file mode 100644 index 00000000..23e650ef --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/metrics.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "context" + + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +type registerables []compbasemetrics.Registerable + +// init registers all metrics. +func init() { + for _, metric := range metrics { + legacyregistry.MustRegister(metric) + } +} + +var ( + requestTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_delegated_authn_request_total", + Help: "Number of HTTP requests partitioned by status code.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"code"}, + ) + + requestLatency = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_delegated_authn_request_duration_seconds", + Help: "Request latency in seconds. Broken down by status code.", + Buckets: []float64{0.25, 0.5, 0.7, 1, 1.5, 3, 5, 10}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"code"}, + ) + + metrics = registerables{ + requestTotal, + requestLatency, + } +) + +// RecordRequestTotal increments the total number of requests for the delegated authentication. +func RecordRequestTotal(ctx context.Context, code string) { + requestTotal.WithContext(ctx).WithLabelValues(code).Inc() +} + +// RecordRequestLatency measures request latency in seconds for the delegated authentication. Broken down by status code. +func RecordRequestLatency(ctx context.Context, code string, latency float64) { + requestLatency.WithContext(ctx).WithLabelValues(code).Observe(latency) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go new file mode 100644 index 00000000..766bde51 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authenticatorfactory + +import ( + "k8s.io/apiserver/pkg/authentication/request/headerrequest" + "k8s.io/apiserver/pkg/server/dynamiccertificates" +) + +type RequestHeaderConfig struct { + // UsernameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. + UsernameHeaders headerrequest.StringSliceProvider + // GroupHeaders are the headers to check (case-insensitively) for a group names. All values will be used. + GroupHeaders headerrequest.StringSliceProvider + // ExtraHeaderPrefixes are the head prefixes to check (case-insentively) for filling in + // the user.Info.Extra. All values of all matching headers will be added. + ExtraHeaderPrefixes headerrequest.StringSliceProvider + // CAContentProvider the options for verifying incoming connections using mTLS. Generally this points to CA bundle file which is used verify the identity of the front proxy. + // It may produce different options at will. + CAContentProvider dynamiccertificates.CAContentProvider + // AllowedClientNames is a list of common names that may be presented by the authenticating front proxy. Empty means: accept any. + AllowedClientNames headerrequest.StringSliceProvider +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go new file mode 100644 index 00000000..5550955a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/compile.go @@ -0,0 +1,155 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/cel" + + "k8s.io/apimachinery/pkg/util/version" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" +) + +const ( + claimsVarName = "claims" + userVarName = "user" +) + +// compiler implements the Compiler interface. +type compiler struct { + // varEnvs is a map of CEL environments, keyed by the name of the CEL variable. + // The CEL variable is available to the expression. + // We have 2 environments, one for claims and one for user. + varEnvs map[string]*environment.EnvSet +} + +// NewCompiler returns a new Compiler. +func NewCompiler(env *environment.EnvSet) Compiler { + return &compiler{ + varEnvs: mustBuildEnvs(env), + } +} + +// CompileClaimsExpression compiles the given expressionAccessor into a CEL program that can be evaluated. +// The claims CEL variable is available to the expression. +func (c compiler) CompileClaimsExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { + return c.compile(expressionAccessor, claimsVarName) +} + +// CompileUserExpression compiles the given expressionAccessor into a CEL program that can be evaluated. +// The user CEL variable is available to the expression. +func (c compiler) CompileUserExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { + return c.compile(expressionAccessor, userVarName) +} + +func (c compiler) compile(expressionAccessor ExpressionAccessor, envVarName string) (CompilationResult, error) { + resultError := func(errorString string, errType apiservercel.ErrorType) (CompilationResult, error) { + return CompilationResult{}, &apiservercel.Error{ + Type: errType, + Detail: errorString, + } + } + + env, err := c.varEnvs[envVarName].Env(environment.StoredExpressions) + if err != nil { + return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) + } + + ast, issues := env.Compile(expressionAccessor.GetExpression()) + if issues != nil { + return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) + } + + found := false + returnTypes := expressionAccessor.ReturnTypes() + for _, returnType := range returnTypes { + if ast.OutputType() == returnType || cel.AnyType == returnType { + found = true + break + } + } + if !found { + var reason string + if len(returnTypes) == 1 { + reason = fmt.Sprintf("must evaluate to %v", returnTypes[0].String()) + } else { + reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) + } + + return resultError(reason, apiservercel.ErrorTypeInvalid) + } + + if _, err = cel.AstToCheckedExpr(ast); err != nil { + // should be impossible since env.Compile returned no issues + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + } + prog, err := env.Program(ast) + if err != nil { + return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) + } + + return CompilationResult{ + Program: prog, + AST: ast, + ExpressionAccessor: expressionAccessor, + }, nil +} + +func buildUserType() *apiservercel.DeclType { + field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { + return apiservercel.NewDeclField(name, declType, required, nil, nil) + } + fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { + result := make(map[string]*apiservercel.DeclField, len(fields)) + for _, f := range fields { + result[f.Name] = f + } + return result + } + + return apiservercel.NewObjectType("kubernetes.UserInfo", fields( + field("username", apiservercel.StringType, false), + field("uid", apiservercel.StringType, false), + field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), + field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), + )) +} + +func mustBuildEnvs(baseEnv *environment.EnvSet) map[string]*environment.EnvSet { + buildEnvSet := func(envOpts []cel.EnvOption, declTypes []*apiservercel.DeclType) *environment.EnvSet { + env, err := baseEnv.Extend(environment.VersionedOptions{ + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: envOpts, + DeclTypes: declTypes, + }) + if err != nil { + panic(fmt.Sprintf("environment misconfigured: %v", err)) + } + return env + } + + userType := buildUserType() + claimsType := apiservercel.NewMapType(apiservercel.StringType, apiservercel.AnyType, -1) + + envs := make(map[string]*environment.EnvSet, 2) // build two environments, one for claims and one for user + envs[claimsVarName] = buildEnvSet([]cel.EnvOption{cel.Variable(claimsVarName, claimsType.CelType())}, []*apiservercel.DeclType{claimsType}) + envs[userVarName] = buildEnvSet([]cel.EnvOption{cel.Variable(userVarName, userType.CelType())}, []*apiservercel.DeclType{userType}) + + return envs +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go new file mode 100644 index 00000000..0fc20841 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/interface.go @@ -0,0 +1,148 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cel contains the CEL related interfaces and structs for authentication. +package cel + +import ( + "context" + + celgo "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types/ref" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// ExpressionAccessor is an interface that provides access to a CEL expression. +type ExpressionAccessor interface { + GetExpression() string + ReturnTypes() []*celgo.Type +} + +// CompilationResult represents a compiled validations expression. +type CompilationResult struct { + Program celgo.Program + AST *celgo.Ast + ExpressionAccessor ExpressionAccessor +} + +// EvaluationResult contains the minimal required fields and metadata of a cel evaluation +type EvaluationResult struct { + EvalResult ref.Val + ExpressionAccessor ExpressionAccessor +} + +// Compiler provides a CEL expression compiler configured with the desired authentication related CEL variables. +type Compiler interface { + CompileClaimsExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) + CompileUserExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) +} + +// ClaimsMapper provides a CEL expression mapper configured with the claims CEL variable. +type ClaimsMapper interface { + // EvalClaimMapping evaluates the given claim mapping expression and returns a EvaluationResult. + // This is used for username, groups and uid claim mapping that contains a single expression. + EvalClaimMapping(ctx context.Context, claims *unstructured.Unstructured) (EvaluationResult, error) + // EvalClaimMappings evaluates the given expressions and returns a list of EvaluationResult. + // This is used for extra claim mapping and claim validation that contains a list of expressions. + EvalClaimMappings(ctx context.Context, claims *unstructured.Unstructured) ([]EvaluationResult, error) +} + +// UserMapper provides a CEL expression mapper configured with the user CEL variable. +type UserMapper interface { + // EvalUser evaluates the given user expressions and returns a list of EvaluationResult. + // This is used for user validation that contains a list of expressions. + EvalUser(ctx context.Context, userInfo *unstructured.Unstructured) ([]EvaluationResult, error) +} + +var _ ExpressionAccessor = &ClaimMappingExpression{} + +// ClaimMappingExpression is a CEL expression that maps a claim. +type ClaimMappingExpression struct { + Expression string +} + +// GetExpression returns the CEL expression. +func (v *ClaimMappingExpression) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *ClaimMappingExpression) ReturnTypes() []*celgo.Type { + // return types is only used for validation. The claims variable that's available + // to the claim mapping expressions is a map[string]interface{}, so we can't + // really know what the return type is during compilation. Strict type checking + // is done during evaluation. + return []*celgo.Type{celgo.AnyType} +} + +var _ ExpressionAccessor = &ClaimValidationCondition{} + +// ClaimValidationCondition is a CEL expression that validates a claim. +type ClaimValidationCondition struct { + Expression string + Message string +} + +// GetExpression returns the CEL expression. +func (v *ClaimValidationCondition) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *ClaimValidationCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.BoolType} +} + +var _ ExpressionAccessor = &ExtraMappingExpression{} + +// ExtraMappingExpression is a CEL expression that maps an extra to a list of values. +type ExtraMappingExpression struct { + Key string + Expression string +} + +// GetExpression returns the CEL expression. +func (v *ExtraMappingExpression) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *ExtraMappingExpression) ReturnTypes() []*celgo.Type { + // return types is only used for validation. The claims variable that's available + // to the claim mapping expressions is a map[string]interface{}, so we can't + // really know what the return type is during compilation. Strict type checking + // is done during evaluation. + return []*celgo.Type{celgo.AnyType} +} + +var _ ExpressionAccessor = &UserValidationCondition{} + +// UserValidationCondition is a CEL expression that validates a User. +type UserValidationCondition struct { + Expression string + Message string +} + +// GetExpression returns the CEL expression. +func (v *UserValidationCondition) GetExpression() string { + return v.Expression +} + +// ReturnTypes returns the CEL expression return types. +func (v *UserValidationCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.BoolType} +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go b/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go new file mode 100644 index 00000000..ab308bb7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/cel/mapper.go @@ -0,0 +1,97 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +var _ ClaimsMapper = &mapper{} +var _ UserMapper = &mapper{} + +// mapper implements the ClaimsMapper and UserMapper interface. +type mapper struct { + compilationResults []CompilationResult +} + +// CELMapper is a struct that holds the compiled expressions for +// username, groups, uid, extra, claimValidation and userValidation +type CELMapper struct { + Username ClaimsMapper + Groups ClaimsMapper + UID ClaimsMapper + Extra ClaimsMapper + ClaimValidationRules ClaimsMapper + UserValidationRules UserMapper +} + +// NewClaimsMapper returns a new ClaimsMapper. +func NewClaimsMapper(compilationResults []CompilationResult) ClaimsMapper { + return &mapper{ + compilationResults: compilationResults, + } +} + +// NewUserMapper returns a new UserMapper. +func NewUserMapper(compilationResults []CompilationResult) UserMapper { + return &mapper{ + compilationResults: compilationResults, + } +} + +// EvalClaimMapping evaluates the given claim mapping expression and returns a EvaluationResult. +func (m *mapper) EvalClaimMapping(ctx context.Context, claims *unstructured.Unstructured) (EvaluationResult, error) { + results, err := m.eval(ctx, map[string]interface{}{claimsVarName: claims.Object}) + if err != nil { + return EvaluationResult{}, err + } + if len(results) != 1 { + return EvaluationResult{}, fmt.Errorf("expected 1 evaluation result, got %d", len(results)) + } + return results[0], nil +} + +// EvalClaimMappings evaluates the given expressions and returns a list of EvaluationResult. +func (m *mapper) EvalClaimMappings(ctx context.Context, claims *unstructured.Unstructured) ([]EvaluationResult, error) { + return m.eval(ctx, map[string]interface{}{claimsVarName: claims.Object}) +} + +// EvalUser evaluates the given user expressions and returns a list of EvaluationResult. +func (m *mapper) EvalUser(ctx context.Context, userInfo *unstructured.Unstructured) ([]EvaluationResult, error) { + return m.eval(ctx, map[string]interface{}{userVarName: userInfo.Object}) +} + +func (m *mapper) eval(ctx context.Context, input map[string]interface{}) ([]EvaluationResult, error) { + evaluations := make([]EvaluationResult, len(m.compilationResults)) + + for i, compilationResult := range m.compilationResults { + var evaluation = &evaluations[i] + evaluation.ExpressionAccessor = compilationResult.ExpressionAccessor + + evalResult, _, err := compilationResult.Program.ContextEval(ctx, input) + if err != nil { + return nil, fmt.Errorf("expression '%s' resulted in error: %w", compilationResult.ExpressionAccessor.GetExpression(), err) + } + + evaluation.EvalResult = evalResult + } + + return evaluations, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go new file mode 100644 index 00000000..2f2cc6ec --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/authenticated_group_adder.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "net/http" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +// AuthenticatedGroupAdder adds system:authenticated group when appropriate +type AuthenticatedGroupAdder struct { + // Authenticator is delegated to make the authentication decision + Authenticator authenticator.Request +} + +// NewAuthenticatedGroupAdder wraps a request authenticator, and adds the system:authenticated group when appropriate. +// Authentication must succeed, the user must not be system:anonymous, the groups system:authenticated or system:unauthenticated must +// not be present +func NewAuthenticatedGroupAdder(auth authenticator.Request) authenticator.Request { + return &AuthenticatedGroupAdder{auth} +} + +func (g *AuthenticatedGroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) + if err != nil || !ok { + return nil, ok, err + } + + if r.User.GetName() == user.Anonymous { + return r, true, nil + } + for _, group := range r.User.GetGroups() { + if group == user.AllAuthenticated || group == user.AllUnauthenticated { + return r, true, nil + } + } + + newGroups := make([]string, 0, len(r.User.GetGroups())+1) + newGroups = append(newGroups, r.User.GetGroups()...) + newGroups = append(newGroups, user.AllAuthenticated) + + ret := *r // shallow copy + ret.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: newGroups, + Extra: r.User.GetExtra(), + } + return &ret, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go new file mode 100644 index 00000000..1234c595 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go @@ -0,0 +1,57 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "net/http" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +// GroupAdder adds groups to an authenticated user.Info +type GroupAdder struct { + // Authenticator is delegated to make the authentication decision + Authenticator authenticator.Request + // Groups are additional groups to add to the user.Info from a successful authentication + Groups []string +} + +// NewGroupAdder wraps a request authenticator, and adds the specified groups to the returned user when authentication succeeds +func NewGroupAdder(auth authenticator.Request, groups []string) authenticator.Request { + return &GroupAdder{auth, groups} +} + +func (g *GroupAdder) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateRequest(req) + if err != nil || !ok { + return nil, ok, err + } + + newGroups := make([]string, 0, len(r.User.GetGroups())+len(g.Groups)) + newGroups = append(newGroups, r.User.GetGroups()...) + newGroups = append(newGroups, g.Groups...) + + ret := *r // shallow copy + ret.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: newGroups, + Extra: r.User.GetExtra(), + } + return &ret, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go b/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go new file mode 100644 index 00000000..51e27a67 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/group/token_group_adder.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package group + +import ( + "context" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +// TokenGroupAdder adds groups to an authenticated user.Info +type TokenGroupAdder struct { + // Authenticator is delegated to make the authentication decision + Authenticator authenticator.Token + // Groups are additional groups to add to the user.Info from a successful authentication + Groups []string +} + +// NewTokenGroupAdder wraps a token authenticator, and adds the specified groups to the returned user when authentication succeeds +func NewTokenGroupAdder(auth authenticator.Token, groups []string) authenticator.Token { + return &TokenGroupAdder{auth, groups} +} + +func (g *TokenGroupAdder) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + r, ok, err := g.Authenticator.AuthenticateToken(ctx, token) + if err != nil || !ok { + return nil, ok, err + } + + newGroups := make([]string, 0, len(r.User.GetGroups())+len(g.Groups)) + newGroups = append(newGroups, r.User.GetGroups()...) + newGroups = append(newGroups, g.Groups...) + + ret := *r // shallow copy + ret.User = &user.DefaultInfo{ + Name: r.User.GetName(), + UID: r.User.GetUID(), + Groups: newGroups, + Extra: r.User.GetExtra(), + } + return &ret, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go new file mode 100644 index 00000000..8a881043 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package anonymous + +import ( + "net/http" + + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" +) + +const ( + anonymousUser = user.Anonymous + unauthenticatedGroup = user.AllUnauthenticated +) + +type Authenticator struct { + allowedPaths map[string]bool +} + +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + if len(a.allowedPaths) > 0 && !a.allowedPaths[req.URL.Path] { + return nil, false, nil + } + + auds, _ := authenticator.AudiencesFrom(req.Context()) + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: anonymousUser, + Groups: []string{unauthenticatedGroup}, + }, + Audiences: auds, + }, true, nil +} + +// NewAuthenticator returns a new anonymous authenticator. +// When conditions is empty all requests are authenticated as anonymous. +// When conditions are non-empty only those requests that match the at-least one +// condition are authenticated as anonymous. +func NewAuthenticator(conditions []apiserver.AnonymousAuthCondition) authenticator.Request { + allowedPaths := make(map[string]bool) + for _, c := range conditions { + allowedPaths[c.Path] = true + } + + return &Authenticator{allowedPaths: allowedPaths} +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go new file mode 100644 index 00000000..68c6ad91 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go @@ -0,0 +1,76 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bearertoken + +import ( + "errors" + "net/http" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/warning" +) + +const ( + invalidTokenWithSpaceWarning = "the provided Authorization header contains extra space before the bearer token, and is ignored" +) + +type Authenticator struct { + auth authenticator.Token +} + +func New(auth authenticator.Token) *Authenticator { + return &Authenticator{auth} +} + +var invalidToken = errors.New("invalid bearer token") + +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + auth := strings.TrimSpace(req.Header.Get("Authorization")) + if auth == "" { + return nil, false, nil + } + parts := strings.SplitN(auth, " ", 3) + if len(parts) < 2 || strings.ToLower(parts[0]) != "bearer" { + return nil, false, nil + } + + token := parts[1] + + // Empty bearer tokens aren't valid + if len(token) == 0 { + // The space before the token case + if len(parts) == 3 { + warning.AddWarning(req.Context(), "", invalidTokenWithSpaceWarning) + } + return nil, false, nil + } + + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) + // if we authenticated successfully, go ahead and remove the bearer token so that no one + // is ever tempted to use it inside of the API server + if ok { + req.Header.Del("Authorization") + } + + // If the token authenticator didn't error, provide a default error + if !ok && err == nil { + err = invalidToken + } + + return resp, ok, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go new file mode 100644 index 00000000..18261639 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go @@ -0,0 +1,204 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package headerrequest + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" + x509request "k8s.io/apiserver/pkg/authentication/request/x509" + "k8s.io/apiserver/pkg/authentication/user" +) + +// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. +type StringSliceProvider interface { + // Value returns the current string slice. Callers should never mutate the returned value. + Value() []string +} + +// StringSliceProviderFunc is a function that matches the StringSliceProvider interface +type StringSliceProviderFunc func() []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (d StringSliceProviderFunc) Value() []string { + return d() +} + +// StaticStringSlice a StringSliceProvider that returns a fixed value +type StaticStringSlice []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (s StaticStringSlice) Value() []string { + return s +} + +type requestHeaderAuthRequestHandler struct { + // nameHeaders are the headers to check (in order, case-insensitively) for an identity. The first header with a value wins. + nameHeaders StringSliceProvider + + // groupHeaders are the headers to check (case-insensitively) for group membership. All values of all headers will be added. + groupHeaders StringSliceProvider + + // extraHeaderPrefixes are the head prefixes to check (case-insensitively) for filling in + // the user.Info.Extra. All values of all matching headers will be added. + extraHeaderPrefixes StringSliceProvider +} + +func New(nameHeaders, groupHeaders, extraHeaderPrefixes []string) (authenticator.Request, error) { + trimmedNameHeaders, err := trimHeaders(nameHeaders...) + if err != nil { + return nil, err + } + trimmedGroupHeaders, err := trimHeaders(groupHeaders...) + if err != nil { + return nil, err + } + trimmedExtraHeaderPrefixes, err := trimHeaders(extraHeaderPrefixes...) + if err != nil { + return nil, err + } + + return NewDynamic( + StaticStringSlice(trimmedNameHeaders), + StaticStringSlice(trimmedGroupHeaders), + StaticStringSlice(trimmedExtraHeaderPrefixes), + ), nil +} + +func NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { + return &requestHeaderAuthRequestHandler{ + nameHeaders: nameHeaders, + groupHeaders: groupHeaders, + extraHeaderPrefixes: extraHeaderPrefixes, + } +} + +func trimHeaders(headerNames ...string) ([]string, error) { + ret := []string{} + for _, headerName := range headerNames { + trimmedHeader := strings.TrimSpace(headerName) + if len(trimmedHeader) == 0 { + return nil, fmt.Errorf("empty header %q", headerName) + } + ret = append(ret, trimmedHeader) + } + + return ret, nil +} + +func NewDynamicVerifyOptionsSecure(verifyOptionFn x509request.VerifyOptionFunc, proxyClientNames, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) authenticator.Request { + headerAuthenticator := NewDynamic(nameHeaders, groupHeaders, extraHeaderPrefixes) + + return x509request.NewDynamicCAVerifier(verifyOptionFn, headerAuthenticator, proxyClientNames) +} + +func (a *requestHeaderAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + name := headerValue(req.Header, a.nameHeaders.Value()) + if len(name) == 0 { + return nil, false, nil + } + groups := allHeaderValues(req.Header, a.groupHeaders.Value()) + extra := newExtra(req.Header, a.extraHeaderPrefixes.Value()) + + // clear headers used for authentication + ClearAuthenticationHeaders(req.Header, a.nameHeaders, a.groupHeaders, a.extraHeaderPrefixes) + + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: name, + Groups: groups, + Extra: extra, + }, + }, true, nil +} + +func ClearAuthenticationHeaders(h http.Header, nameHeaders, groupHeaders, extraHeaderPrefixes StringSliceProvider) { + for _, headerName := range nameHeaders.Value() { + h.Del(headerName) + } + for _, headerName := range groupHeaders.Value() { + h.Del(headerName) + } + for _, prefix := range extraHeaderPrefixes.Value() { + for k := range h { + if hasPrefixIgnoreCase(k, prefix) { + delete(h, k) // we have the raw key so avoid relying on canonicalization + } + } + } +} + +func hasPrefixIgnoreCase(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[:len(prefix)], prefix) +} + +func headerValue(h http.Header, headerNames []string) string { + for _, headerName := range headerNames { + headerValue := h.Get(headerName) + if len(headerValue) > 0 { + return headerValue + } + } + return "" +} + +func allHeaderValues(h http.Header, headerNames []string) []string { + ret := []string{} + for _, headerName := range headerNames { + headerKey := http.CanonicalHeaderKey(headerName) + values, ok := h[headerKey] + if !ok { + continue + } + + for _, headerValue := range values { + if len(headerValue) > 0 { + ret = append(ret, headerValue) + } + } + } + return ret +} + +func unescapeExtraKey(encodedKey string) string { + key, err := url.PathUnescape(encodedKey) // Decode %-encoded bytes. + if err != nil { + return encodedKey // Always record extra strings, even if malformed/unencoded. + } + return key +} + +func newExtra(h http.Header, headerPrefixes []string) map[string][]string { + ret := map[string][]string{} + + // we have to iterate over prefixes first in order to have proper ordering inside the value slices + for _, prefix := range headerPrefixes { + for headerName, vv := range h { + if !hasPrefixIgnoreCase(headerName, prefix) { + continue + } + + extraKey := unescapeExtraKey(strings.ToLower(headerName[len(prefix):])) + ret[extraKey] = append(ret[extraKey], vv...) + } + } + + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go new file mode 100644 index 00000000..dc844ee7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_controller.go @@ -0,0 +1,340 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package headerrequest + +import ( + "context" + "encoding/json" + "fmt" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const ( + authenticationRoleName = "extension-apiserver-authentication-reader" +) + +// RequestHeaderAuthRequestProvider a provider that knows how to dynamically fill parts of RequestHeaderConfig struct +type RequestHeaderAuthRequestProvider interface { + UsernameHeaders() []string + GroupHeaders() []string + ExtraHeaderPrefixes() []string + AllowedClientNames() []string +} + +var _ RequestHeaderAuthRequestProvider = &RequestHeaderAuthRequestController{} + +type requestHeaderBundle struct { + UsernameHeaders []string + GroupHeaders []string + ExtraHeaderPrefixes []string + AllowedClientNames []string +} + +// RequestHeaderAuthRequestController a controller that exposes a set of methods for dynamically filling parts of RequestHeaderConfig struct. +// The methods are sourced from the config map which is being monitored by this controller. +// The controller is primed from the server at the construction time for components that don't want to dynamically react to changes +// in the config map. +type RequestHeaderAuthRequestController struct { + name string + + configmapName string + configmapNamespace string + + client kubernetes.Interface + configmapLister corev1listers.ConfigMapNamespaceLister + configmapInformer cache.SharedIndexInformer + configmapInformerSynced cache.InformerSynced + + queue workqueue.TypedRateLimitingInterface[string] + + // exportedRequestHeaderBundle is a requestHeaderBundle that contains the last read, non-zero length content of the configmap + exportedRequestHeaderBundle atomic.Value + + usernameHeadersKey string + groupHeadersKey string + extraHeaderPrefixesKey string + allowedClientNamesKey string +} + +// NewRequestHeaderAuthRequestController creates a new controller that implements RequestHeaderAuthRequestController +func NewRequestHeaderAuthRequestController( + cmName string, + cmNamespace string, + client kubernetes.Interface, + usernameHeadersKey, groupHeadersKey, extraHeaderPrefixesKey, allowedClientNamesKey string) *RequestHeaderAuthRequestController { + c := &RequestHeaderAuthRequestController{ + name: "RequestHeaderAuthRequestController", + + client: client, + + configmapName: cmName, + configmapNamespace: cmNamespace, + + usernameHeadersKey: usernameHeadersKey, + groupHeadersKey: groupHeadersKey, + extraHeaderPrefixesKey: extraHeaderPrefixesKey, + allowedClientNamesKey: allowedClientNamesKey, + + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "RequestHeaderAuthRequestController"}, + ), + } + + // we construct our own informer because we need such a small subset of the information available. Just one namespace. + c.configmapInformer = coreinformers.NewFilteredConfigMapInformer(client, c.configmapNamespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(listOptions *metav1.ListOptions) { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", c.configmapName).String() + }) + + c.configmapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + if cast, ok := obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + } + return true // always return true just in case. The checks are fairly cheap + }, + Handler: cache.ResourceEventHandlerFuncs{ + // we have a filter, so any time we're called, we may as well queue. We only ever check one configmap + // so we don't have to be choosy about our key. + AddFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + c.queue.Add(c.keyFn()) + }, + DeleteFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + }, + }) + + c.configmapLister = corev1listers.NewConfigMapLister(c.configmapInformer.GetIndexer()).ConfigMaps(c.configmapNamespace) + c.configmapInformerSynced = c.configmapInformer.HasSynced + + return c +} + +func (c *RequestHeaderAuthRequestController) UsernameHeaders() []string { + return c.loadRequestHeaderFor(c.usernameHeadersKey) +} + +func (c *RequestHeaderAuthRequestController) GroupHeaders() []string { + return c.loadRequestHeaderFor(c.groupHeadersKey) +} + +func (c *RequestHeaderAuthRequestController) ExtraHeaderPrefixes() []string { + return c.loadRequestHeaderFor(c.extraHeaderPrefixesKey) +} + +func (c *RequestHeaderAuthRequestController) AllowedClientNames() []string { + return c.loadRequestHeaderFor(c.allowedClientNamesKey) +} + +// Run starts RequestHeaderAuthRequestController controller and blocks until stopCh is closed. +func (c *RequestHeaderAuthRequestController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting %s", c.name) + defer klog.Infof("Shutting down %s", c.name) + + go c.configmapInformer.Run(ctx.Done()) + + // wait for caches to fill before starting your work + if !cache.WaitForNamedCacheSync(c.name, ctx.Done(), c.configmapInformerSynced) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, ctx.Done()) + + <-ctx.Done() +} + +// // RunOnce runs a single sync loop +func (c *RequestHeaderAuthRequestController) RunOnce(ctx context.Context) error { + configMap, err := c.client.CoreV1().ConfigMaps(c.configmapNamespace).Get(ctx, c.configmapName, metav1.GetOptions{}) + switch { + case errors.IsNotFound(err): + // ignore, authConfigMap is nil now + return nil + case errors.IsForbidden(err): + klog.Warningf("Unable to get configmap/%s in %s. Usually fixed by "+ + "'kubectl create rolebinding -n %s ROLEBINDING_NAME --role=%s --serviceaccount=YOUR_NS:YOUR_SA'", + c.configmapName, c.configmapNamespace, c.configmapNamespace, authenticationRoleName) + return err + case err != nil: + return err + } + return c.syncConfigMap(configMap) +} + +func (c *RequestHeaderAuthRequestController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *RequestHeaderAuthRequestController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// sync reads the config and propagates the changes to exportedRequestHeaderBundle +// which is exposed by the set of methods that are used to fill RequestHeaderConfig struct +func (c *RequestHeaderAuthRequestController) sync() error { + configMap, err := c.configmapLister.Get(c.configmapName) + if err != nil { + return err + } + return c.syncConfigMap(configMap) +} + +func (c *RequestHeaderAuthRequestController) syncConfigMap(configMap *corev1.ConfigMap) error { + hasChanged, newRequestHeaderBundle, err := c.hasRequestHeaderBundleChanged(configMap) + if err != nil { + return err + } + if hasChanged { + c.exportedRequestHeaderBundle.Store(newRequestHeaderBundle) + klog.V(2).Infof("Loaded a new request header values for %v", c.name) + } + return nil +} + +func (c *RequestHeaderAuthRequestController) hasRequestHeaderBundleChanged(cm *corev1.ConfigMap) (bool, *requestHeaderBundle, error) { + currentHeadersBundle, err := c.getRequestHeaderBundleFromConfigMap(cm) + if err != nil { + return false, nil, err + } + + rawHeaderBundle := c.exportedRequestHeaderBundle.Load() + if rawHeaderBundle == nil { + return true, currentHeadersBundle, nil + } + + // check to see if we have a change. If the values are the same, do nothing. + loadedHeadersBundle, ok := rawHeaderBundle.(*requestHeaderBundle) + if !ok { + return true, currentHeadersBundle, nil + } + + if !equality.Semantic.DeepEqual(loadedHeadersBundle, currentHeadersBundle) { + return true, currentHeadersBundle, nil + } + return false, nil, nil +} + +func (c *RequestHeaderAuthRequestController) getRequestHeaderBundleFromConfigMap(cm *corev1.ConfigMap) (*requestHeaderBundle, error) { + usernameHeaderCurrentValue, err := deserializeStrings(cm.Data[c.usernameHeadersKey]) + if err != nil { + return nil, err + } + + groupHeadersCurrentValue, err := deserializeStrings(cm.Data[c.groupHeadersKey]) + if err != nil { + return nil, err + } + + extraHeaderPrefixesCurrentValue, err := deserializeStrings(cm.Data[c.extraHeaderPrefixesKey]) + if err != nil { + return nil, err + + } + + allowedClientNamesCurrentValue, err := deserializeStrings(cm.Data[c.allowedClientNamesKey]) + if err != nil { + return nil, err + } + + return &requestHeaderBundle{ + UsernameHeaders: usernameHeaderCurrentValue, + GroupHeaders: groupHeadersCurrentValue, + ExtraHeaderPrefixes: extraHeaderPrefixesCurrentValue, + AllowedClientNames: allowedClientNamesCurrentValue, + }, nil +} + +func (c *RequestHeaderAuthRequestController) loadRequestHeaderFor(key string) []string { + rawHeaderBundle := c.exportedRequestHeaderBundle.Load() + if rawHeaderBundle == nil { + return nil // this can happen if we've been unable load data from the apiserver for some reason + } + headerBundle := rawHeaderBundle.(*requestHeaderBundle) + + switch key { + case c.usernameHeadersKey: + return headerBundle.UsernameHeaders + case c.groupHeadersKey: + return headerBundle.GroupHeaders + case c.extraHeaderPrefixesKey: + return headerBundle.ExtraHeaderPrefixes + case c.allowedClientNamesKey: + return headerBundle.AllowedClientNames + default: + return nil + } +} + +func (c *RequestHeaderAuthRequestController) keyFn() string { + // this format matches DeletionHandlingMetaNamespaceKeyFunc for our single key + return c.configmapNamespace + "/" + c.configmapName +} + +func deserializeStrings(in string) ([]string, error) { + if len(in) == 0 { + return nil, nil + } + var ret []string + if err := json.Unmarshal([]byte(in), &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go b/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go new file mode 100644 index 00000000..512063be --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go @@ -0,0 +1,71 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "net/http" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +// unionAuthRequestHandler authenticates requests using a chain of authenticator.Requests +type unionAuthRequestHandler struct { + // Handlers is a chain of request authenticators to delegate to + Handlers []authenticator.Request + // FailOnError determines whether an error returns short-circuits the chain + FailOnError bool +} + +// New returns a request authenticator that validates credentials using a chain of authenticator.Request objects. +// The entire chain is tried until one succeeds. If all fail, an aggregate error is returned. +func New(authRequestHandlers ...authenticator.Request) authenticator.Request { + if len(authRequestHandlers) == 1 { + return authRequestHandlers[0] + } + return &unionAuthRequestHandler{Handlers: authRequestHandlers, FailOnError: false} +} + +// NewFailOnError returns a request authenticator that validates credentials using a chain of authenticator.Request objects. +// The first error short-circuits the chain. +func NewFailOnError(authRequestHandlers ...authenticator.Request) authenticator.Request { + if len(authRequestHandlers) == 1 { + return authRequestHandlers[0] + } + return &unionAuthRequestHandler{Handlers: authRequestHandlers, FailOnError: true} +} + +// AuthenticateRequest authenticates the request using a chain of authenticator.Request objects. +func (authHandler *unionAuthRequestHandler) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + var errlist []error + for _, currAuthRequestHandler := range authHandler.Handlers { + resp, ok, err := currAuthRequestHandler.AuthenticateRequest(req) + if err != nil { + if authHandler.FailOnError { + return resp, ok, err + } + errlist = append(errlist, err) + continue + } + + if ok { + return resp, ok, err + } + } + + return nil, false, utilerrors.NewAggregate(errlist) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go new file mode 100644 index 00000000..ee8c89f5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/websocket/protocol.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "encoding/base64" + "errors" + "net/http" + "net/textproto" + "strings" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/util/httpstream/wsstream" + "k8s.io/apiserver/pkg/authentication/authenticator" +) + +const bearerProtocolPrefix = "base64url.bearer.authorization.k8s.io." + +var protocolHeader = textproto.CanonicalMIMEHeaderKey("Sec-WebSocket-Protocol") + +var errInvalidToken = errors.New("invalid bearer token") + +// ProtocolAuthenticator allows a websocket connection to provide a bearer token as a subprotocol +// in the format "base64url.bearer.authorization." +type ProtocolAuthenticator struct { + // auth is the token authenticator to use to validate the token + auth authenticator.Token +} + +func NewProtocolAuthenticator(auth authenticator.Token) *ProtocolAuthenticator { + return &ProtocolAuthenticator{auth} +} + +func (a *ProtocolAuthenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + // Only accept websocket connections + if !wsstream.IsWebSocketRequest(req) { + return nil, false, nil + } + + token := "" + sawTokenProtocol := false + filteredProtocols := []string{} + for _, protocolHeader := range req.Header[protocolHeader] { + for _, protocol := range strings.Split(protocolHeader, ",") { + protocol = strings.TrimSpace(protocol) + + if !strings.HasPrefix(protocol, bearerProtocolPrefix) { + filteredProtocols = append(filteredProtocols, protocol) + continue + } + + if sawTokenProtocol { + return nil, false, errors.New("multiple base64.bearer.authorization tokens specified") + } + sawTokenProtocol = true + + encodedToken := strings.TrimPrefix(protocol, bearerProtocolPrefix) + decodedToken, err := base64.RawURLEncoding.DecodeString(encodedToken) + if err != nil { + return nil, false, errors.New("invalid base64.bearer.authorization token encoding") + } + if !utf8.Valid(decodedToken) { + return nil, false, errors.New("invalid base64.bearer.authorization token") + } + token = string(decodedToken) + } + } + + // Must pass at least one other subprotocol so that we can remove the one containing the bearer token, + // and there is at least one to echo back to the client + if len(token) > 0 && len(filteredProtocols) == 0 { + return nil, false, errors.New("missing additional subprotocol") + } + + if len(token) == 0 { + return nil, false, nil + } + + resp, ok, err := a.auth.AuthenticateToken(req.Context(), token) + + // on success, remove the protocol with the token + if ok { + // https://tools.ietf.org/html/rfc6455#section-11.3.4 indicates the Sec-WebSocket-Protocol header may appear multiple times + // in a request, and is logically the same as a single Sec-WebSocket-Protocol header field that contains all values + req.Header.Set(protocolHeader, strings.Join(filteredProtocols, ",")) + } + + // If the token authenticator didn't error, provide a default error + if !ok && err == nil { + err = errInvalidToken + } + + return resp, ok, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS new file mode 100644 index 00000000..3c3b94c5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-auth-certificates-approvers +reviewers: + - sig-auth-certificates-reviewers +labels: + - sig/auth diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go new file mode 100644 index 00000000..8f3d36b6 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package x509 provides a request authenticator that validates and +// extracts user information from client certificates +package x509 // import "k8s.io/apiserver/pkg/authentication/request/x509" diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go new file mode 100644 index 00000000..462eb4cc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/verify_options.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x509 + +import ( + "crypto/x509" + "fmt" + + "k8s.io/client-go/util/cert" +) + +// StaticVerifierFn is a VerifyOptionFunc that always returns the same value. This allows verify options that cannot change. +func StaticVerifierFn(opts x509.VerifyOptions) VerifyOptionFunc { + return func() (x509.VerifyOptions, bool) { + return opts, true + } +} + +// NewStaticVerifierFromFile creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func NewStaticVerifierFromFile(clientCA string) (VerifyOptionFunc, error) { + if len(clientCA) == 0 { + return nil, nil + } + + // Wrap with an x509 verifier + var err error + opts := DefaultVerifyOptions() + opts.Roots, err = cert.NewPool(clientCA) + if err != nil { + return nil, fmt.Errorf("error loading certs from %s: %v", clientCA, err) + } + + return StaticVerifierFn(opts), nil +} + +// StringSliceProvider is a way to get a string slice value. It is heavily used for authentication headers among other places. +type StringSliceProvider interface { + // Value returns the current string slice. Callers should never mutate the returned value. + Value() []string +} + +// StringSliceProviderFunc is a function that matches the StringSliceProvider interface +type StringSliceProviderFunc func() []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (d StringSliceProviderFunc) Value() []string { + return d() +} + +// StaticStringSlice a StringSliceProvider that returns a fixed value +type StaticStringSlice []string + +// Value returns the current string slice. Callers should never mutate the returned value. +func (s StaticStringSlice) Value() []string { + return s +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go new file mode 100644 index 00000000..d67c5354 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go @@ -0,0 +1,285 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x509 + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/hex" + "fmt" + "net/http" + "strings" + "time" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +/* + * By default, the following metric is defined as falling under + * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/1209-metrics-stability/kubernetes-control-plane-metrics-stability.md#stability-classes) + * + * Promoting the stability level of the metric is a responsibility of the component owner, since it + * involves explicitly acknowledging support for the metric across multiple releases, in accordance with + * the metric stability policy. + */ +var clientCertificateExpirationHistogram = metrics.NewHistogram( + &metrics.HistogramOpts{ + Namespace: "apiserver", + Subsystem: "client", + Name: "certificate_expiration_seconds", + Help: "Distribution of the remaining lifetime on the certificate used to authenticate a request.", + Buckets: []float64{ + 0, + 1800, // 30 minutes + 3600, // 1 hour + 7200, // 2 hours + 21600, // 6 hours + 43200, // 12 hours + 86400, // 1 day + 172800, // 2 days + 345600, // 4 days + 604800, // 1 week + 2592000, // 1 month + 7776000, // 3 months + 15552000, // 6 months + 31104000, // 1 year + }, + StabilityLevel: metrics.ALPHA, + }, +) + +func init() { + legacyregistry.MustRegister(clientCertificateExpirationHistogram) +} + +// UserConversion defines an interface for extracting user info from a client certificate chain +type UserConversion interface { + User(chain []*x509.Certificate) (*authenticator.Response, bool, error) +} + +// UserConversionFunc is a function that implements the UserConversion interface. +type UserConversionFunc func(chain []*x509.Certificate) (*authenticator.Response, bool, error) + +// User implements x509.UserConversion +func (f UserConversionFunc) User(chain []*x509.Certificate) (*authenticator.Response, bool, error) { + return f(chain) +} + +func columnSeparatedHex(d []byte) string { + h := strings.ToUpper(hex.EncodeToString(d)) + var sb strings.Builder + for i, r := range h { + sb.WriteRune(r) + if i%2 == 1 && i != len(h)-1 { + sb.WriteRune(':') + } + } + return sb.String() +} + +func certificateIdentifier(c *x509.Certificate) string { + return fmt.Sprintf( + "SN=%d, SKID=%s, AKID=%s", + c.SerialNumber, + columnSeparatedHex(c.SubjectKeyId), + columnSeparatedHex(c.AuthorityKeyId), + ) +} + +// VerifyOptionFunc is function which provides a shallow copy of the VerifyOptions to the authenticator. This allows +// for cases where the options (particularly the CAs) can change. If the bool is false, then the returned VerifyOptions +// are ignored and the authenticator will express "no opinion". This allows a clear signal for cases where a CertPool +// is eventually expected, but not currently present. +type VerifyOptionFunc func() (x509.VerifyOptions, bool) + +// Authenticator implements request.Authenticator by extracting user info from verified client certificates +type Authenticator struct { + verifyOptionsFn VerifyOptionFunc + user UserConversion +} + +// New returns a request.Authenticator that verifies client certificates using the provided +// VerifyOptions, and converts valid certificate chains into user.Info using the provided UserConversion +func New(opts x509.VerifyOptions, user UserConversion) *Authenticator { + return NewDynamic(StaticVerifierFn(opts), user) +} + +// NewDynamic returns a request.Authenticator that verifies client certificates using the provided +// VerifyOptionFunc (which may be dynamic), and converts valid certificate chains into user.Info using the provided UserConversion +func NewDynamic(verifyOptionsFn VerifyOptionFunc, user UserConversion) *Authenticator { + return &Authenticator{verifyOptionsFn, user} +} + +// AuthenticateRequest authenticates the request using presented client certificates +func (a *Authenticator) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { + return nil, false, nil + } + + // Use intermediates, if provided + optsCopy, ok := a.verifyOptionsFn() + // if there are intentionally no verify options, then we cannot authenticate this request + if !ok { + return nil, false, nil + } + if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { + optsCopy.Intermediates = x509.NewCertPool() + for _, intermediate := range req.TLS.PeerCertificates[1:] { + optsCopy.Intermediates.AddCert(intermediate) + } + } + + /* + kubernetes mutual (2-way) x509 between client and apiserver: + + 1. apiserver sending its apiserver certificate along with its publickey to client + 2. client verifies the apiserver certificate sent against its cluster certificate authority data + 3. client sending its client certificate along with its public key to the apiserver + >4. apiserver verifies the client certificate sent against its cluster certificate authority data + + description: + here, with this function, + client certificate and pub key sent during the handshake process + are verified by apiserver against its cluster certificate authority data + + normal args related to this stage: + --client-ca-file string If set, any request presenting a client certificate signed by + one of the authorities in the client-ca-file is authenticated with an identity + corresponding to the CommonName of the client certificate. + + (retrievable from "kube-apiserver --help" command) + (suggested by @deads2k) + + see also: + - for the step 1, see: staging/src/k8s.io/apiserver/pkg/server/options/serving.go + - for the step 2, see: staging/src/k8s.io/client-go/transport/transport.go + - for the step 3, see: staging/src/k8s.io/client-go/transport/transport.go + */ + + remaining := req.TLS.PeerCertificates[0].NotAfter.Sub(time.Now()) + clientCertificateExpirationHistogram.WithContext(req.Context()).Observe(remaining.Seconds()) + chains, err := req.TLS.PeerCertificates[0].Verify(optsCopy) + if err != nil { + return nil, false, fmt.Errorf( + "verifying certificate %s failed: %w", + certificateIdentifier(req.TLS.PeerCertificates[0]), + err, + ) + } + + var errlist []error + for _, chain := range chains { + user, ok, err := a.user.User(chain) + if err != nil { + errlist = append(errlist, err) + continue + } + + if ok { + return user, ok, err + } + } + return nil, false, utilerrors.NewAggregate(errlist) +} + +// Verifier implements request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +type Verifier struct { + verifyOptionsFn VerifyOptionFunc + auth authenticator.Request + + // allowedCommonNames contains the common names which a verified certificate is allowed to have. + // If empty, all verified certificates are allowed. + allowedCommonNames StringSliceProvider +} + +// NewVerifier create a request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +func NewVerifier(opts x509.VerifyOptions, auth authenticator.Request, allowedCommonNames sets.String) authenticator.Request { + return NewDynamicCAVerifier(StaticVerifierFn(opts), auth, StaticStringSlice(allowedCommonNames.List())) +} + +// NewDynamicCAVerifier create a request.Authenticator by verifying a client cert on the request, then delegating to the wrapped auth +func NewDynamicCAVerifier(verifyOptionsFn VerifyOptionFunc, auth authenticator.Request, allowedCommonNames StringSliceProvider) authenticator.Request { + return &Verifier{verifyOptionsFn, auth, allowedCommonNames} +} + +// AuthenticateRequest verifies the presented client certificate, then delegates to the wrapped auth +func (a *Verifier) AuthenticateRequest(req *http.Request) (*authenticator.Response, bool, error) { + if req.TLS == nil || len(req.TLS.PeerCertificates) == 0 { + return nil, false, nil + } + + // Use intermediates, if provided + optsCopy, ok := a.verifyOptionsFn() + // if there are intentionally no verify options, then we cannot authenticate this request + if !ok { + return nil, false, nil + } + if optsCopy.Intermediates == nil && len(req.TLS.PeerCertificates) > 1 { + optsCopy.Intermediates = x509.NewCertPool() + for _, intermediate := range req.TLS.PeerCertificates[1:] { + optsCopy.Intermediates.AddCert(intermediate) + } + } + + if _, err := req.TLS.PeerCertificates[0].Verify(optsCopy); err != nil { + return nil, false, err + } + if err := a.verifySubject(req.TLS.PeerCertificates[0].Subject); err != nil { + return nil, false, err + } + return a.auth.AuthenticateRequest(req) +} + +func (a *Verifier) verifySubject(subject pkix.Name) error { + // No CN restrictions + if len(a.allowedCommonNames.Value()) == 0 { + return nil + } + // Enforce CN restrictions + for _, allowedCommonName := range a.allowedCommonNames.Value() { + if allowedCommonName == subject.CommonName { + return nil + } + } + return fmt.Errorf("x509: subject with cn=%s is not in the allowed list", subject.CommonName) +} + +// DefaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, +// and requires certificates to be valid for client auth (x509.ExtKeyUsageClientAuth) +func DefaultVerifyOptions() x509.VerifyOptions { + return x509.VerifyOptions{ + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} + +// CommonNameUserConversion builds user info from a certificate chain using the subject's CommonName +var CommonNameUserConversion = UserConversionFunc(func(chain []*x509.Certificate) (*authenticator.Response, bool, error) { + if len(chain[0].Subject.CommonName) == 0 { + return nil, false, nil + } + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: chain[0].Subject.CommonName, + Groups: chain[0].Subject.Organization, + }, + }, true, nil +}) diff --git a/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go new file mode 100644 index 00000000..3e29d4e7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go @@ -0,0 +1,228 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "context" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + + "k8s.io/klog/v2" +) + +const ( + ServiceAccountUsernamePrefix = "system:serviceaccount:" + ServiceAccountUsernameSeparator = ":" + ServiceAccountGroupPrefix = "system:serviceaccounts:" + AllServiceAccountsGroup = "system:serviceaccounts" + // CredentialIDKey is the key used in a user's "extra" to specify the unique + // identifier for this identity document). + CredentialIDKey = "authentication.kubernetes.io/credential-id" + // IssuedCredentialIDAuditAnnotationKey is the annotation key used in the audit event that is persisted to the + // '/token' endpoint for service accounts. + // This annotation indicates the generated credential identifier for the service account token being issued. + // This is useful when tracing back the origin of tokens that have gone on to make request that have persisted + // their credential-identifier into the audit log via the user's extra info stored on subsequent audit events. + IssuedCredentialIDAuditAnnotationKey = "authentication.kubernetes.io/issued-credential-id" + // PodNameKey is the key used in a user's "extra" to specify the pod name of + // the authenticating request. + PodNameKey = "authentication.kubernetes.io/pod-name" + // PodUIDKey is the key used in a user's "extra" to specify the pod UID of + // the authenticating request. + PodUIDKey = "authentication.kubernetes.io/pod-uid" + // NodeNameKey is the key used in a user's "extra" to specify the node name of + // the authenticating request. + NodeNameKey = "authentication.kubernetes.io/node-name" + // NodeUIDKey is the key used in a user's "extra" to specify the node UID of + // the authenticating request. + NodeUIDKey = "authentication.kubernetes.io/node-uid" +) + +// MakeUsername generates a username from the given namespace and ServiceAccount name. +// The resulting username can be passed to SplitUsername to extract the original namespace and ServiceAccount name. +func MakeUsername(namespace, name string) string { + return ServiceAccountUsernamePrefix + namespace + ServiceAccountUsernameSeparator + name +} + +// MatchesUsername checks whether the provided username matches the namespace and name without +// allocating. Use this when checking a service account namespace and name against a known string. +func MatchesUsername(namespace, name string, username string) bool { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return false + } + username = username[len(ServiceAccountUsernamePrefix):] + + if !strings.HasPrefix(username, namespace) { + return false + } + username = username[len(namespace):] + + if !strings.HasPrefix(username, ServiceAccountUsernameSeparator) { + return false + } + username = username[len(ServiceAccountUsernameSeparator):] + + return username == name +} + +var invalidUsernameErr = fmt.Errorf("Username must be in the form %s", MakeUsername("namespace", "name")) + +// SplitUsername returns the namespace and ServiceAccount name embedded in the given username, +// or an error if the username is not a valid name produced by MakeUsername +func SplitUsername(username string) (string, string, error) { + if !strings.HasPrefix(username, ServiceAccountUsernamePrefix) { + return "", "", invalidUsernameErr + } + trimmed := strings.TrimPrefix(username, ServiceAccountUsernamePrefix) + parts := strings.Split(trimmed, ServiceAccountUsernameSeparator) + if len(parts) != 2 { + return "", "", invalidUsernameErr + } + namespace, name := parts[0], parts[1] + if len(apimachineryvalidation.ValidateNamespaceName(namespace, false)) != 0 { + return "", "", invalidUsernameErr + } + if len(apimachineryvalidation.ValidateServiceAccountName(name, false)) != 0 { + return "", "", invalidUsernameErr + } + return namespace, name, nil +} + +// MakeGroupNames generates service account group names for the given namespace +func MakeGroupNames(namespace string) []string { + return []string{ + AllServiceAccountsGroup, + MakeNamespaceGroupName(namespace), + } +} + +// MakeNamespaceGroupName returns the name of the group all service accounts in the namespace are included in +func MakeNamespaceGroupName(namespace string) string { + return ServiceAccountGroupPrefix + namespace +} + +// UserInfo returns a user.Info interface for the given namespace, service account name and UID +func UserInfo(namespace, name, uid string) user.Info { + return (&ServiceAccountInfo{ + Name: name, + Namespace: namespace, + UID: uid, + }).UserInfo() +} + +type ServiceAccountInfo struct { + Name, Namespace, UID string + PodName, PodUID string + CredentialID string + NodeName, NodeUID string +} + +func (sa *ServiceAccountInfo) UserInfo() user.Info { + info := &user.DefaultInfo{ + Name: MakeUsername(sa.Namespace, sa.Name), + UID: sa.UID, + Groups: MakeGroupNames(sa.Namespace), + } + + if sa.PodName != "" && sa.PodUID != "" { + if info.Extra == nil { + info.Extra = make(map[string][]string) + } + info.Extra[PodNameKey] = []string{sa.PodName} + info.Extra[PodUIDKey] = []string{sa.PodUID} + } + if sa.CredentialID != "" { + if info.Extra == nil { + info.Extra = make(map[string][]string) + } + info.Extra[CredentialIDKey] = []string{sa.CredentialID} + } + if sa.NodeName != "" { + if info.Extra == nil { + info.Extra = make(map[string][]string) + } + info.Extra[NodeNameKey] = []string{sa.NodeName} + // node UID is optional and will only be set if the node name is set + if sa.NodeUID != "" { + info.Extra[NodeUIDKey] = []string{sa.NodeUID} + } + } + + return info +} + +// CredentialIDForJTI converts a given JTI string into a credential identifier for use in a +// users 'extra' info. +func CredentialIDForJTI(jti string) string { + if len(jti) == 0 { + return "" + } + return "JTI=" + jti +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *v1.Secret, sa *v1.ServiceAccount) bool { + if secret.Type != v1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[v1.ServiceAccountNameKey] + uid := secret.Annotations[v1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} + +func GetOrCreateServiceAccount(coreClient v1core.CoreV1Interface, namespace, name string) (*v1.ServiceAccount, error) { + sa, err := coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return sa, nil + } + if !apierrors.IsNotFound(err) { + return nil, err + } + + // Create the namespace if we can't verify it exists. + // Tolerate errors, since we don't know whether this component has namespace creation permissions. + if _, err := coreClient.Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); apierrors.IsNotFound(err) { + if _, err = coreClient.Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { + klog.Warningf("create non-exist namespace %s failed:%v", namespace, err) + } + } + + // Create the service account + sa, err = coreClient.ServiceAccounts(namespace).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}}, metav1.CreateOptions{}) + if apierrors.IsAlreadyExists(err) { + // If we're racing to init and someone else already created it, re-fetch + return coreClient.ServiceAccounts(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + } + return sa, err +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go new file mode 100644 index 00000000..a2c5ce68 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_simple.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "time" + + utilcache "k8s.io/apimachinery/pkg/util/cache" + "k8s.io/utils/clock" +) + +type simpleCache struct { + cache *utilcache.Expiring +} + +func newSimpleCache(clock clock.Clock) cache { + return &simpleCache{cache: utilcache.NewExpiringWithClock(clock)} +} + +func (c *simpleCache) get(key string) (*cacheRecord, bool) { + record, ok := c.cache.Get(key) + if !ok { + return nil, false + } + value, ok := record.(*cacheRecord) + return value, ok +} + +func (c *simpleCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.cache.Set(key, value, ttl) +} + +func (c *simpleCache) remove(key string) { + c.cache.Delete(key) +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go new file mode 100644 index 00000000..e5b7afe4 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cache_striped.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "hash/fnv" + "time" +) + +// split cache lookups across N striped caches +type stripedCache struct { + stripeCount uint32 + hashFunc func(string) uint32 + caches []cache +} + +type hashFunc func(string) uint32 +type newCacheFunc func() cache + +func newStripedCache(stripeCount int, hash hashFunc, newCacheFunc newCacheFunc) cache { + caches := []cache{} + for i := 0; i < stripeCount; i++ { + caches = append(caches, newCacheFunc()) + } + return &stripedCache{ + stripeCount: uint32(stripeCount), + hashFunc: hash, + caches: caches, + } +} + +func (c *stripedCache) get(key string) (*cacheRecord, bool) { + return c.caches[c.hashFunc(key)%c.stripeCount].get(key) +} +func (c *stripedCache) set(key string, value *cacheRecord, ttl time.Duration) { + c.caches[c.hashFunc(key)%c.stripeCount].set(key, value, ttl) +} +func (c *stripedCache) remove(key string) { + c.caches[c.hashFunc(key)%c.stripeCount].remove(key) +} + +func fnvHashFunc(key string) uint32 { + f := fnv.New32() + f.Write([]byte(key)) + return f.Sum32() +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go new file mode 100644 index 00000000..18167ddd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/cached_token_authenticator.go @@ -0,0 +1,318 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/binary" + "errors" + "hash" + "io" + "runtime" + "sync" + "time" + "unsafe" + + "golang.org/x/sync/singleflight" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + auditinternal "k8s.io/apiserver/pkg/apis/audit" + "k8s.io/apiserver/pkg/audit" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/warning" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +var errAuthnCrash = apierrors.NewInternalError(errors.New("authentication failed unexpectedly")) + +const sharedLookupTimeout = 30 * time.Second + +// cacheRecord holds the three return values of the authenticator.Token AuthenticateToken method +type cacheRecord struct { + resp *authenticator.Response + ok bool + err error + + // this cache assumes token authn has no side-effects or temporal dependence. + // neither of these are true for audit annotations set via AddAuditAnnotation. + // + // for audit annotations, the assumption is that for some period of time (cache TTL), + // all requests with the same API audiences and the same bearer token result in the + // same annotations. This may not be true if the authenticator sets an annotation + // based on the current time, but that may be okay since cache TTLs are generally + // small (seconds). + annotations map[string]string + warnings []*cacheWarning +} + +type cacheWarning struct { + agent string + text string +} + +type cachedTokenAuthenticator struct { + authenticator authenticator.Token + + cacheErrs bool + successTTL time.Duration + failureTTL time.Duration + + cache cache + group singleflight.Group + + // hashPool is a per authenticator pool of hash.Hash (to avoid allocations from building the Hash) + // HMAC with SHA-256 and a random key is used to prevent precomputation and length extension attacks + // It also mitigates hash map DOS attacks via collisions (the inputs are supplied by untrusted users) + hashPool *sync.Pool +} + +type cache interface { + // given a key, return the record, and whether or not it existed + get(key string) (value *cacheRecord, exists bool) + // caches the record for the key + set(key string, value *cacheRecord, ttl time.Duration) + // removes the record for the key + remove(key string) +} + +// New returns a token authenticator that caches the results of the specified authenticator. A ttl of 0 bypasses the cache. +func New(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration) authenticator.Token { + return newWithClock(authenticator, cacheErrs, successTTL, failureTTL, clock.RealClock{}) +} + +func newWithClock(authenticator authenticator.Token, cacheErrs bool, successTTL, failureTTL time.Duration, clock clock.Clock) authenticator.Token { + randomCacheKey := make([]byte, 32) + if _, err := rand.Read(randomCacheKey); err != nil { + panic(err) // rand should never fail + } + + return &cachedTokenAuthenticator{ + authenticator: authenticator, + cacheErrs: cacheErrs, + successTTL: successTTL, + failureTTL: failureTTL, + // Cache performance degrades noticeably when the number of + // tokens in operation exceeds the size of the cache. It is + // cheap to make the cache big in the second dimension below, + // the memory is only consumed when that many tokens are being + // used. Currently we advertise support 5k nodes and 10k + // namespaces; a 32k entry cache is therefore a 2x safety + // margin. + cache: newStripedCache(32, fnvHashFunc, func() cache { return newSimpleCache(clock) }), + + hashPool: &sync.Pool{ + New: func() interface{} { + return hmac.New(sha256.New, randomCacheKey) + }, + }, + } +} + +// AuthenticateToken implements authenticator.Token +func (a *cachedTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + record := a.doAuthenticateToken(ctx, token) + if !record.ok || record.err != nil { + return nil, false, record.err + } + for key, value := range record.annotations { + audit.AddAuditAnnotation(ctx, key, value) + } + for _, w := range record.warnings { + warning.AddWarning(ctx, w.agent, w.text) + } + return record.resp, true, nil +} + +func (a *cachedTokenAuthenticator) doAuthenticateToken(ctx context.Context, token string) *cacheRecord { + doneAuthenticating := stats.authenticating(ctx) + + auds, audsOk := authenticator.AudiencesFrom(ctx) + + key := keyFunc(a.hashPool, auds, token) + if record, ok := a.cache.get(key); ok { + // Record cache hit + doneAuthenticating(true) + return record + } + + // Record cache miss + doneBlocking := stats.blocking(ctx) + defer doneBlocking() + defer doneAuthenticating(false) + + c := a.group.DoChan(key, func() (val interface{}, _ error) { + // always use one place to read and write the output of AuthenticateToken + record := &cacheRecord{} + + doneFetching := stats.fetching(ctx) + // We're leaving the request handling stack so we need to handle crashes + // ourselves. Log a stack trace and return a 500 if something panics. + defer func() { + if r := recover(); r != nil { + // make sure to always return a record + record.err = errAuthnCrash + val = record + + // Same as stdlib http server code. Manually allocate stack + // trace buffer size to prevent excessively large logs + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + klog.Errorf("%v\n%s", r, buf) + } + doneFetching(record.err == nil) + }() + + // Check again for a cached record. We may have raced with a fetch. + if record, ok := a.cache.get(key); ok { + return record, nil + } + + // Detach the context because the lookup may be shared by multiple callers, + // however propagate the audience. + ctx, cancel := context.WithTimeout(context.Background(), sharedLookupTimeout) + defer cancel() + + if audsOk { + ctx = authenticator.WithAudiences(ctx, auds) + } + recorder := &recorder{} + ctx = warning.WithWarningRecorder(ctx, recorder) + + ctx = audit.WithAuditContext(ctx) + ac := audit.AuditContextFrom(ctx) + // since this is shared work between multiple requests, we have no way of knowing if any + // particular request supports audit annotations. thus we always attempt to record them. + ac.Event.Level = auditinternal.LevelMetadata + + record.resp, record.ok, record.err = a.authenticator.AuthenticateToken(ctx, token) + record.annotations = ac.Event.Annotations + record.warnings = recorder.extractWarnings() + + if !a.cacheErrs && record.err != nil { + return record, nil + } + + switch { + case record.ok && a.successTTL > 0: + a.cache.set(key, record, a.successTTL) + case !record.ok && a.failureTTL > 0: + a.cache.set(key, record, a.failureTTL) + } + + return record, nil + }) + + select { + case result := <-c: + // we always set Val and never set Err + return result.Val.(*cacheRecord) + case <-ctx.Done(): + // fake a record on context cancel + return &cacheRecord{err: ctx.Err()} + } +} + +// keyFunc generates a string key by hashing the inputs. +// This lowers the memory requirement of the cache and keeps tokens out of memory. +func keyFunc(hashPool *sync.Pool, auds []string, token string) string { + h := hashPool.Get().(hash.Hash) + + h.Reset() + + // try to force stack allocation + var a [4]byte + b := a[:] + + writeLengthPrefixedString(h, b, token) + // encode the length of audiences to avoid ambiguities + writeLength(h, b, len(auds)) + for _, aud := range auds { + writeLengthPrefixedString(h, b, aud) + } + + key := toString(h.Sum(nil)) // skip base64 encoding to save an allocation + + hashPool.Put(h) + + return key +} + +// writeLengthPrefixedString writes s with a length prefix to prevent ambiguities, i.e. "xy" + "z" == "x" + "yz" +// the length of b is assumed to be 4 (b is mutated by this function to store the length of s) +func writeLengthPrefixedString(w io.Writer, b []byte, s string) { + writeLength(w, b, len(s)) + if _, err := w.Write(toBytes(s)); err != nil { + panic(err) // Write() on hash never fails + } +} + +// writeLength encodes length into b and then writes it via the given writer +// the length of b is assumed to be 4 +func writeLength(w io.Writer, b []byte, length int) { + binary.BigEndian.PutUint32(b, uint32(length)) + if _, err := w.Write(b); err != nil { + panic(err) // Write() on hash never fails + } +} + +// toBytes performs unholy acts to avoid allocations +func toBytes(s string) []byte { + // unsafe.StringData is unspecified for the empty string, so we provide a strict interpretation + if len(s) == 0 { + return nil + } + // Copied from go 1.20.1 os.File.WriteString + // https://github.com/golang/go/blob/202a1a57064127c3f19d96df57b9f9586145e21c/src/os/file.go#L246 + return unsafe.Slice(unsafe.StringData(s), len(s)) +} + +// toString performs unholy acts to avoid allocations +func toString(b []byte) string { + // unsafe.SliceData relies on cap whereas we want to rely on len + if len(b) == 0 { + return "" + } + // Copied from go 1.20.1 strings.Builder.String + // https://github.com/golang/go/blob/202a1a57064127c3f19d96df57b9f9586145e21c/src/strings/builder.go#L48 + return unsafe.String(unsafe.SliceData(b), len(b)) +} + +// simple recorder that only appends warning +type recorder struct { + mu sync.Mutex + warnings []*cacheWarning +} + +// AddWarning adds a warning to recorder. +func (r *recorder) AddWarning(agent, text string) { + r.mu.Lock() + defer r.mu.Unlock() + r.warnings = append(r.warnings, &cacheWarning{agent: agent, text: text}) +} + +func (r *recorder) extractWarnings() []*cacheWarning { + r.mu.Lock() + defer r.mu.Unlock() + warnings := r.warnings + r.warnings = nil + return warnings +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go new file mode 100644 index 00000000..d1b959aa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/cache/stats.go @@ -0,0 +1,126 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +var ( + requestLatency = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "request_duration_seconds", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) + requestCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "request_total", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) + fetchCount = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "fetch_total", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) + activeFetchCount = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Namespace: "authentication", + Subsystem: "token_cache", + Name: "active_fetch_count", + StabilityLevel: metrics.ALPHA, + }, + []string{"status"}, + ) +) + +func init() { + legacyregistry.MustRegister( + requestLatency, + requestCount, + fetchCount, + activeFetchCount, + ) +} + +const ( + hitTag = "hit" + missTag = "miss" + + fetchFailedTag = "error" + fetchOkTag = "ok" + + fetchInFlightTag = "in_flight" + fetchBlockedTag = "blocked" +) + +type statsCollector struct{} + +var stats = statsCollector{} + +func (statsCollector) authenticating(ctx context.Context) func(hit bool) { + start := time.Now() + return func(hit bool) { + var tag string + if hit { + tag = hitTag + } else { + tag = missTag + } + + latency := time.Since(start) + + requestCount.WithContext(ctx).WithLabelValues(tag).Inc() + requestLatency.WithContext(ctx).WithLabelValues(tag).Observe(float64(latency.Milliseconds()) / 1000) + } +} + +func (statsCollector) blocking(ctx context.Context) func() { + activeFetchCount.WithContext(ctx).WithLabelValues(fetchBlockedTag).Inc() + return activeFetchCount.WithContext(ctx).WithLabelValues(fetchBlockedTag).Dec +} + +func (statsCollector) fetching(ctx context.Context) func(ok bool) { + activeFetchCount.WithContext(ctx).WithLabelValues(fetchInFlightTag).Inc() + return func(ok bool) { + var tag string + if ok { + tag = fetchOkTag + } else { + tag = fetchFailedTag + } + + fetchCount.WithContext(ctx).WithLabelValues(tag).Inc() + + activeFetchCount.WithContext(ctx).WithLabelValues(fetchInFlightTag).Dec() + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go new file mode 100644 index 00000000..bd7fccbd --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go @@ -0,0 +1,99 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tokenfile + +import ( + "context" + "encoding/csv" + "fmt" + "io" + "os" + "strings" + + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/klog/v2" +) + +type TokenAuthenticator struct { + tokens map[string]*user.DefaultInfo +} + +// New returns a TokenAuthenticator for a single token +func New(tokens map[string]*user.DefaultInfo) *TokenAuthenticator { + return &TokenAuthenticator{ + tokens: tokens, + } +} + +// NewCSV returns a TokenAuthenticator, populated from a CSV file. +// The CSV file must contain records in the format "token,username,useruid" +func NewCSV(path string) (*TokenAuthenticator, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + recordNum := 0 + tokens := make(map[string]*user.DefaultInfo) + reader := csv.NewReader(file) + reader.FieldsPerRecord = -1 + for { + record, err := reader.Read() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if len(record) < 3 { + return nil, fmt.Errorf("token file '%s' must have at least 3 columns (token, user name, user uid), found %d", path, len(record)) + } + + recordNum++ + if record[0] == "" { + klog.Warningf("empty token has been found in token file '%s', record number '%d'", path, recordNum) + continue + } + + obj := &user.DefaultInfo{ + Name: record[1], + UID: record[2], + } + if _, exist := tokens[record[0]]; exist { + klog.Warningf("duplicate token has been found in token file '%s', record number '%d'", path, recordNum) + } + tokens[record[0]] = obj + + if len(record) >= 4 { + obj.Groups = strings.Split(record[3], ",") + } + } + + return &TokenAuthenticator{ + tokens: tokens, + }, nil +} + +func (a *TokenAuthenticator) AuthenticateToken(ctx context.Context, value string) (*authenticator.Response, bool, error) { + user, ok := a.tokens[value] + if !ok { + return nil, false, nil + } + return &authenticator.Response{User: user}, true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go new file mode 100644 index 00000000..3d87fd72 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package user contains utilities for dealing with simple user exchange in the auth +// packages. The user.Info interface defines an interface for exchanging that info. +package user // import "k8s.io/apiserver/pkg/authentication/user" diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go new file mode 100644 index 00000000..4d6ec098 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -0,0 +1,84 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +// Info describes a user that has been authenticated to the system. +type Info interface { + // GetName returns the name that uniquely identifies this user among all + // other active users. + GetName() string + // GetUID returns a unique value for a particular user that will change + // if the user is removed from the system and another user is added with + // the same name. + GetUID() string + // GetGroups returns the names of the groups the user is a member of + GetGroups() []string + + // GetExtra can contain any additional information that the authenticator + // thought was interesting. One example would be scopes on a token. + // Keys in this map should be namespaced to the authenticator or + // authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo" + // This is a map[string][]string because it needs to be serializeable into + // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization + // delegation flows + // In order to faithfully round-trip through an impersonation flow, these keys + // MUST be lowercase. + GetExtra() map[string][]string +} + +// DefaultInfo provides a simple user information exchange object +// for components that implement the UserInfo interface. +type DefaultInfo struct { + Name string + UID string + Groups []string + Extra map[string][]string +} + +func (i *DefaultInfo) GetName() string { + return i.Name +} + +func (i *DefaultInfo) GetUID() string { + return i.UID +} + +func (i *DefaultInfo) GetGroups() []string { + return i.Groups +} + +func (i *DefaultInfo) GetExtra() map[string][]string { + return i.Extra +} + +// well-known user and group names +const ( + SystemPrivilegedGroup = "system:masters" + NodesGroup = "system:nodes" + MonitoringGroup = "system:monitoring" + AllUnauthenticated = "system:unauthenticated" + AllAuthenticated = "system:authenticated" + + Anonymous = "system:anonymous" + APIServerUser = "system:apiserver" + + // core kubernetes process identities + KubeProxy = "system:kube-proxy" + KubeControllerManager = "system:kube-controller-manager" + KubeScheduler = "system:kube-scheduler" +) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go new file mode 100644 index 00000000..d39deb17 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go @@ -0,0 +1,184 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizer + +import ( + "context" + "net/http" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apiserver/pkg/authentication/user" +) + +// Attributes is an interface used by an Authorizer to get information about a request +// that is used to make an authorization decision. +type Attributes interface { + // GetUser returns the user.Info object to authorize + GetUser() user.Info + + // GetVerb returns the kube verb associated with API requests (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy), + // or the lowercased HTTP verb associated with non-API requests (this includes get, put, post, patch, and delete) + GetVerb() string + + // When IsReadOnly() == true, the request has no side effects, other than + // caching, logging, and other incidentals. + IsReadOnly() bool + + // The namespace of the object, if a request is for a REST object. + GetNamespace() string + + // The kind of object, if a request is for a REST object. + GetResource() string + + // GetSubresource returns the subresource being requested, if present + GetSubresource() string + + // GetName returns the name of the object as parsed off the request. This will not be present for all request types, but + // will be present for: get, update, delete + GetName() string + + // The group of the resource, if a request is for a REST object. + GetAPIGroup() string + + // GetAPIVersion returns the version of the group requested, if a request is for a REST object. + GetAPIVersion() string + + // IsResourceRequest returns true for requests to API resources, like /api/v1/nodes, + // and false for non-resource endpoints like /api, /healthz + IsResourceRequest() bool + + // GetPath returns the path of the request + GetPath() string + + // ParseFieldSelector is lazy, thread-safe, and stores the parsed result and error. + // It returns an error if the field selector cannot be parsed. + // The returned requirements must be treated as readonly and not modified. + GetFieldSelector() (fields.Requirements, error) + + // ParseLabelSelector is lazy, thread-safe, and stores the parsed result and error. + // It returns an error if the label selector cannot be parsed. + // The returned requirements must be treated as readonly and not modified. + GetLabelSelector() (labels.Requirements, error) +} + +// Authorizer makes an authorization decision based on information gained by making +// zero or more calls to methods of the Attributes interface. It returns nil when an action is +// authorized, otherwise it returns an error. +type Authorizer interface { + Authorize(ctx context.Context, a Attributes) (authorized Decision, reason string, err error) +} + +type AuthorizerFunc func(ctx context.Context, a Attributes) (Decision, string, error) + +func (f AuthorizerFunc) Authorize(ctx context.Context, a Attributes) (Decision, string, error) { + return f(ctx, a) +} + +// RuleResolver provides a mechanism for resolving the list of rules that apply to a given user within a namespace. +type RuleResolver interface { + // RulesFor get the list of cluster wide rules, the list of rules in the specific namespace, incomplete status and errors. + RulesFor(user user.Info, namespace string) ([]ResourceRuleInfo, []NonResourceRuleInfo, bool, error) +} + +// RequestAttributesGetter provides a function that extracts Attributes from an http.Request +type RequestAttributesGetter interface { + GetRequestAttributes(user.Info, *http.Request) Attributes +} + +// AttributesRecord implements Attributes interface. +type AttributesRecord struct { + User user.Info + Verb string + Namespace string + APIGroup string + APIVersion string + Resource string + Subresource string + Name string + ResourceRequest bool + Path string + + FieldSelectorRequirements fields.Requirements + FieldSelectorParsingErr error + LabelSelectorRequirements labels.Requirements + LabelSelectorParsingErr error +} + +func (a AttributesRecord) GetUser() user.Info { + return a.User +} + +func (a AttributesRecord) GetVerb() string { + return a.Verb +} + +func (a AttributesRecord) IsReadOnly() bool { + return a.Verb == "get" || a.Verb == "list" || a.Verb == "watch" +} + +func (a AttributesRecord) GetNamespace() string { + return a.Namespace +} + +func (a AttributesRecord) GetResource() string { + return a.Resource +} + +func (a AttributesRecord) GetSubresource() string { + return a.Subresource +} + +func (a AttributesRecord) GetName() string { + return a.Name +} + +func (a AttributesRecord) GetAPIGroup() string { + return a.APIGroup +} + +func (a AttributesRecord) GetAPIVersion() string { + return a.APIVersion +} + +func (a AttributesRecord) IsResourceRequest() bool { + return a.ResourceRequest +} + +func (a AttributesRecord) GetPath() string { + return a.Path +} + +func (a AttributesRecord) GetFieldSelector() (fields.Requirements, error) { + return a.FieldSelectorRequirements, a.FieldSelectorParsingErr +} + +func (a AttributesRecord) GetLabelSelector() (labels.Requirements, error) { + return a.LabelSelectorRequirements, a.LabelSelectorParsingErr +} + +type Decision int + +const ( + // DecisionDeny means that an authorizer decided to deny the action. + DecisionDeny Decision = iota + // DecisionAllow means that an authorizer decided to allow the action. + DecisionAllow + // DecisionNoOpinion means that an authorizer has no opinion on whether + // to allow or deny an action. + DecisionNoOpinion +) diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go new file mode 100644 index 00000000..8f7d9d9e --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizer/rule.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizer + +type ResourceRuleInfo interface { + // GetVerbs returns a list of kubernetes resource API verbs. + GetVerbs() []string + // GetAPIGroups return the names of the APIGroup that contains the resources. + GetAPIGroups() []string + // GetResources return a list of resources the rule applies to. + GetResources() []string + // GetResourceNames return a white list of names that the rule applies to. + GetResourceNames() []string +} + +// DefaultResourceRuleInfo holds information that describes a rule for the resource +type DefaultResourceRuleInfo struct { + Verbs []string + APIGroups []string + Resources []string + ResourceNames []string +} + +func (i *DefaultResourceRuleInfo) GetVerbs() []string { + return i.Verbs +} + +func (i *DefaultResourceRuleInfo) GetAPIGroups() []string { + return i.APIGroups +} + +func (i *DefaultResourceRuleInfo) GetResources() []string { + return i.Resources +} + +func (i *DefaultResourceRuleInfo) GetResourceNames() []string { + return i.ResourceNames +} + +type NonResourceRuleInfo interface { + // GetVerbs returns a list of kubernetes resource API verbs. + GetVerbs() []string + // GetNonResourceURLs return a set of partial urls that a user should have access to. + GetNonResourceURLs() []string +} + +// DefaultNonResourceRuleInfo holds information that describes a rule for the non-resource +type DefaultNonResourceRuleInfo struct { + Verbs []string + NonResourceURLs []string +} + +func (i *DefaultNonResourceRuleInfo) GetVerbs() []string { + return i.Verbs +} + +func (i *DefaultNonResourceRuleInfo) GetNonResourceURLs() []string { + return i.NonResourceURLs +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go new file mode 100644 index 00000000..6fe3fa96 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go @@ -0,0 +1,95 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizerfactory + +import ( + "context" + "errors" + + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// alwaysAllowAuthorizer is an implementation of authorizer.Attributes +// which always says yes to an authorization request. +// It is useful in tests and when using kubernetes in an open manner. +type alwaysAllowAuthorizer struct{} + +func (alwaysAllowAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (authorized authorizer.Decision, reason string, err error) { + return authorizer.DecisionAllow, "", nil +} + +func (alwaysAllowAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + return []authorizer.ResourceRuleInfo{ + &authorizer.DefaultResourceRuleInfo{ + Verbs: []string{"*"}, + APIGroups: []string{"*"}, + Resources: []string{"*"}, + }, + }, []authorizer.NonResourceRuleInfo{ + &authorizer.DefaultNonResourceRuleInfo{ + Verbs: []string{"*"}, + NonResourceURLs: []string{"*"}, + }, + }, false, nil +} + +func NewAlwaysAllowAuthorizer() *alwaysAllowAuthorizer { + return new(alwaysAllowAuthorizer) +} + +// alwaysDenyAuthorizer is an implementation of authorizer.Attributes +// which always says no to an authorization request. +// It is useful in unit tests to force an operation to be forbidden. +type alwaysDenyAuthorizer struct{} + +func (alwaysDenyAuthorizer) Authorize(ctx context.Context, a authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { + return authorizer.DecisionNoOpinion, "Everything is forbidden.", nil +} + +func (alwaysDenyAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + return []authorizer.ResourceRuleInfo{}, []authorizer.NonResourceRuleInfo{}, false, nil +} + +func NewAlwaysDenyAuthorizer() *alwaysDenyAuthorizer { + return new(alwaysDenyAuthorizer) +} + +type privilegedGroupAuthorizer struct { + groups []string +} + +func (r *privilegedGroupAuthorizer) Authorize(ctx context.Context, attr authorizer.Attributes) (authorizer.Decision, string, error) { + if attr.GetUser() == nil { + return authorizer.DecisionNoOpinion, "Error", errors.New("no user on request.") + } + for _, attr_group := range attr.GetUser().GetGroups() { + for _, priv_group := range r.groups { + if priv_group == attr_group { + return authorizer.DecisionAllow, "", nil + } + } + } + return authorizer.DecisionNoOpinion, "", nil +} + +// NewPrivilegedGroups is for use in loopback scenarios +func NewPrivilegedGroups(groups ...string) *privilegedGroupAuthorizer { + return &privilegedGroupAuthorizer{ + groups: groups, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go new file mode 100644 index 00000000..07018914 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go @@ -0,0 +1,60 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizerfactory + +import ( + "errors" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/apiserver/plugin/pkg/authorizer/webhook" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +// DelegatingAuthorizerConfig is the minimal configuration needed to create an authorizer +// built to delegate authorization to a kube API server +type DelegatingAuthorizerConfig struct { + SubjectAccessReviewClient authorizationclient.AuthorizationV1Interface + + // AllowCacheTTL is the length of time that a successful authorization response will be cached + AllowCacheTTL time.Duration + + // DenyCacheTTL is the length of time that an unsuccessful authorization response will be cached. + // You generally want more responsive, "deny, try again" flows. + DenyCacheTTL time.Duration + + // WebhookRetryBackoff specifies the backoff parameters for the authorization webhook retry logic. + // This allows us to configure the sleep time at each iteration and the maximum number of retries allowed + // before we fail the webhook call in order to limit the fan out that ensues when the system is degraded. + WebhookRetryBackoff *wait.Backoff +} + +func (c DelegatingAuthorizerConfig) New() (authorizer.Authorizer, error) { + if c.WebhookRetryBackoff == nil { + return nil, errors.New("retry backoff parameters for delegating authorization webhook has not been specified") + } + + return webhook.NewFromInterface( + c.SubjectAccessReviewClient, + c.AllowCacheTTL, + c.DenyCacheTTL, + *c.WebhookRetryBackoff, + authorizer.DecisionNoOpinion, + NewDelegatingAuthorizerMetrics(), + ) +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/metrics.go b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/metrics.go new file mode 100644 index 00000000..3f72a25b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/metrics.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package authorizerfactory + +import ( + "context" + "sync" + + celmetrics "k8s.io/apiserver/pkg/authorization/cel" + webhookmetrics "k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +var registerMetrics sync.Once + +// RegisterMetrics registers authorizer metrics. +func RegisterMetrics() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(requestTotal) + legacyregistry.MustRegister(requestLatency) + }) +} + +var ( + requestTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_delegated_authz_request_total", + Help: "Number of HTTP requests partitioned by status code.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"code"}, + ) + + requestLatency = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Name: "apiserver_delegated_authz_request_duration_seconds", + Help: "Request latency in seconds. Broken down by status code.", + Buckets: []float64{0.25, 0.5, 0.7, 1, 1.5, 3, 5, 10}, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"code"}, + ) +) + +var _ = webhookmetrics.AuthorizerMetrics(delegatingAuthorizerMetrics{}) + +type delegatingAuthorizerMetrics struct { + // no-op for webhook metrics for now, delegating authorization reports original total/latency metrics + webhookmetrics.NoopWebhookMetrics + // no-op for matchCondition metrics for now, delegating authorization doesn't configure match conditions + celmetrics.NoopMatcherMetrics +} + +func NewDelegatingAuthorizerMetrics() delegatingAuthorizerMetrics { + RegisterMetrics() + return delegatingAuthorizerMetrics{} +} + +// RecordRequestTotal increments the total number of requests for the delegated authorization. +func (delegatingAuthorizerMetrics) RecordRequestTotal(ctx context.Context, code string) { + requestTotal.WithContext(ctx).WithLabelValues(code).Add(1) +} + +// RecordRequestLatency measures request latency in seconds for the delegated authorization. Broken down by status code. +func (delegatingAuthorizerMetrics) RecordRequestLatency(ctx context.Context, code string, latency float64) { + requestLatency.WithContext(ctx).WithLabelValues(code).Observe(latency) +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go new file mode 100644 index 00000000..829ff91d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/compile.go @@ -0,0 +1,330 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/cel" + celast "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/operators" + "github.com/google/cel-go/common/types/ref" + + authorizationv1 "k8s.io/api/authorization/v1" + "k8s.io/apimachinery/pkg/util/version" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/apiserver/pkg/cel/environment" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +const ( + subjectAccessReviewRequestVarName = "request" + + fieldSelectorVarName = "fieldSelector" + labelSelectorVarName = "labelSelector" +) + +// CompilationResult represents a compiled authorization cel expression. +type CompilationResult struct { + Program cel.Program + ExpressionAccessor ExpressionAccessor + + // These track if a given expression uses fieldSelector and labelSelector, + // so construction of data passed to the CEL expression can be optimized if those fields are unused. + UsesFieldSelector bool + UsesLabelSelector bool +} + +// EvaluationResult contains the minimal required fields and metadata of a cel evaluation +type EvaluationResult struct { + EvalResult ref.Val + ExpressionAccessor ExpressionAccessor +} + +// Compiler is an interface for compiling CEL expressions with the desired environment mode. +type Compiler interface { + CompileCELExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) +} + +type compiler struct { + envSet *environment.EnvSet +} + +// NewCompiler returns a new Compiler. +func NewCompiler(env *environment.EnvSet) Compiler { + return &compiler{ + envSet: mustBuildEnv(env), + } +} + +func (c compiler) CompileCELExpression(expressionAccessor ExpressionAccessor) (CompilationResult, error) { + resultError := func(errorString string, errType apiservercel.ErrorType) (CompilationResult, error) { + err := &apiservercel.Error{ + Type: errType, + Detail: errorString, + } + return CompilationResult{ + ExpressionAccessor: expressionAccessor, + }, err + } + env, err := c.envSet.Env(environment.StoredExpressions) + if err != nil { + return resultError(fmt.Sprintf("unexpected error loading CEL environment: %v", err), apiservercel.ErrorTypeInternal) + } + ast, issues := env.Compile(expressionAccessor.GetExpression()) + if issues != nil { + return resultError("compilation failed: "+issues.String(), apiservercel.ErrorTypeInvalid) + } + found := false + returnTypes := expressionAccessor.ReturnTypes() + for _, returnType := range returnTypes { + if ast.OutputType() == returnType { + found = true + break + } + } + if !found { + var reason string + if len(returnTypes) == 1 { + reason = fmt.Sprintf("must evaluate to %v but got %v", returnTypes[0].String(), ast.OutputType()) + } else { + reason = fmt.Sprintf("must evaluate to one of %v", returnTypes) + } + + return resultError(reason, apiservercel.ErrorTypeInvalid) + } + checkedExpr, err := cel.AstToCheckedExpr(ast) + if err != nil { + // should be impossible since env.Compile returned no issues + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + } + celAST, err := celast.ToAST(checkedExpr) + if err != nil { + // should be impossible since env.Compile returned no issues + return resultError("unexpected compilation error: "+err.Error(), apiservercel.ErrorTypeInternal) + } + + var usesFieldSelector, usesLabelSelector bool + celast.PreOrderVisit(celast.NavigateAST(celAST), celast.NewExprVisitor(func(e celast.Expr) { + // we already know we use both, no need to inspect more + if usesFieldSelector && usesLabelSelector { + return + } + + var fieldName string + switch e.Kind() { + case celast.SelectKind: + // simple select (.fieldSelector / .labelSelector) + fieldName = e.AsSelect().FieldName() + case celast.CallKind: + // optional select (.?fieldSelector / .?labelSelector) + if e.AsCall().FunctionName() != operators.OptSelect { + return + } + args := e.AsCall().Args() + // args[0] is the receiver (what comes before the `.?`), args[1] is the field name being optionally selected (what comes after the `.?`) + if len(args) != 2 || args[1].Kind() != celast.LiteralKind || args[1].AsLiteral().Type() != cel.StringType { + return + } + fieldName, _ = args[1].AsLiteral().Value().(string) + } + + switch fieldName { + case fieldSelectorVarName: + usesFieldSelector = true + case labelSelectorVarName: + usesLabelSelector = true + } + })) + + prog, err := env.Program(ast) + if err != nil { + return resultError("program instantiation failed: "+err.Error(), apiservercel.ErrorTypeInternal) + } + return CompilationResult{ + Program: prog, + ExpressionAccessor: expressionAccessor, + UsesFieldSelector: usesFieldSelector, + UsesLabelSelector: usesLabelSelector, + }, nil +} + +func mustBuildEnv(baseEnv *environment.EnvSet) *environment.EnvSet { + field := func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField { + return apiservercel.NewDeclField(name, declType, required, nil, nil) + } + fields := func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField { + result := make(map[string]*apiservercel.DeclField, len(fields)) + for _, f := range fields { + result[f.Name] = f + } + return result + } + subjectAccessReviewSpecRequestType := buildRequestType(field, fields) + extended, err := baseEnv.Extend( + environment.VersionedOptions{ + // we record this as 1.0 since it was available in the + // first version that supported this feature + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: []cel.EnvOption{ + cel.Variable(subjectAccessReviewRequestVarName, subjectAccessReviewSpecRequestType.CelType()), + }, + DeclTypes: []*apiservercel.DeclType{ + subjectAccessReviewSpecRequestType, + }, + }, + ) + if err != nil { + panic(fmt.Sprintf("environment misconfigured: %v", err)) + } + + return extended +} + +// buildRequestType generates a DeclType for SubjectAccessReviewSpec. +// if attributes are added here, also add to convertObjectToUnstructured. +func buildRequestType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + resourceAttributesType := buildResourceAttributesType(field, fields) + nonResourceAttributesType := buildNonResourceAttributesType(field, fields) + return apiservercel.NewObjectType("kubernetes.SubjectAccessReviewSpec", fields( + field("resourceAttributes", resourceAttributesType, false), + field("nonResourceAttributes", nonResourceAttributesType, false), + field("user", apiservercel.StringType, false), + field("groups", apiservercel.NewListType(apiservercel.StringType, -1), false), + field("extra", apiservercel.NewMapType(apiservercel.StringType, apiservercel.NewListType(apiservercel.StringType, -1), -1), false), + field("uid", apiservercel.StringType, false), + )) +} + +// buildResourceAttributesType generates a DeclType for ResourceAttributes. +// if attributes are added here, also add to convertObjectToUnstructured. +func buildResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + resourceAttributesFields := []*apiservercel.DeclField{ + field("namespace", apiservercel.StringType, false), + field("verb", apiservercel.StringType, false), + field("group", apiservercel.StringType, false), + field("version", apiservercel.StringType, false), + field("resource", apiservercel.StringType, false), + field("subresource", apiservercel.StringType, false), + field("name", apiservercel.StringType, false), + } + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + resourceAttributesFields = append(resourceAttributesFields, field("fieldSelector", buildFieldSelectorType(field, fields), false)) + resourceAttributesFields = append(resourceAttributesFields, field("labelSelector", buildLabelSelectorType(field, fields), false)) + } + return apiservercel.NewObjectType("kubernetes.ResourceAttributes", fields(resourceAttributesFields...)) +} + +func buildFieldSelectorType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.FieldSelectorAttributes", fields( + field("rawSelector", apiservercel.StringType, false), + field("requirements", apiservercel.NewListType(buildSelectorRequirementType(field, fields), -1), false), + )) +} + +func buildLabelSelectorType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.LabelSelectorAttributes", fields( + field("rawSelector", apiservercel.StringType, false), + field("requirements", apiservercel.NewListType(buildSelectorRequirementType(field, fields), -1), false), + )) +} + +func buildSelectorRequirementType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.SelectorRequirement", fields( + field("key", apiservercel.StringType, false), + field("operator", apiservercel.StringType, false), + field("values", apiservercel.NewListType(apiservercel.StringType, -1), false), + )) +} + +// buildNonResourceAttributesType generates a DeclType for NonResourceAttributes. +// if attributes are added here, also add to convertObjectToUnstructured. +func buildNonResourceAttributesType(field func(name string, declType *apiservercel.DeclType, required bool) *apiservercel.DeclField, fields func(fields ...*apiservercel.DeclField) map[string]*apiservercel.DeclField) *apiservercel.DeclType { + return apiservercel.NewObjectType("kubernetes.NonResourceAttributes", fields( + field("path", apiservercel.StringType, false), + field("verb", apiservercel.StringType, false), + )) +} + +func convertObjectToUnstructured(obj *authorizationv1.SubjectAccessReviewSpec, includeFieldSelector, includeLabelSelector bool) map[string]interface{} { + // Construct version containing every SubjectAccessReview user and string attribute field, even omitempty ones, for evaluation by CEL + extra := obj.Extra + if extra == nil { + extra = map[string]authorizationv1.ExtraValue{} + } + ret := map[string]interface{}{ + "user": obj.User, + "groups": obj.Groups, + "uid": string(obj.UID), + "extra": extra, + } + if obj.ResourceAttributes != nil { + resourceAttributes := map[string]interface{}{ + "namespace": obj.ResourceAttributes.Namespace, + "verb": obj.ResourceAttributes.Verb, + "group": obj.ResourceAttributes.Group, + "version": obj.ResourceAttributes.Version, + "resource": obj.ResourceAttributes.Resource, + "subresource": obj.ResourceAttributes.Subresource, + "name": obj.ResourceAttributes.Name, + } + + if includeFieldSelector && obj.ResourceAttributes.FieldSelector != nil { + if len(obj.ResourceAttributes.FieldSelector.Requirements) > 0 { + requirements := make([]map[string]interface{}, 0, len(obj.ResourceAttributes.FieldSelector.Requirements)) + for _, r := range obj.ResourceAttributes.FieldSelector.Requirements { + requirements = append(requirements, map[string]interface{}{ + "key": r.Key, + "operator": r.Operator, + "values": r.Values, + }) + } + resourceAttributes[fieldSelectorVarName] = map[string]interface{}{"requirements": requirements} + } + if len(obj.ResourceAttributes.FieldSelector.RawSelector) > 0 { + resourceAttributes[fieldSelectorVarName] = map[string]interface{}{"rawSelector": obj.ResourceAttributes.FieldSelector.RawSelector} + } + } + + if includeLabelSelector && obj.ResourceAttributes.LabelSelector != nil { + if len(obj.ResourceAttributes.LabelSelector.Requirements) > 0 { + requirements := make([]map[string]interface{}, 0, len(obj.ResourceAttributes.LabelSelector.Requirements)) + for _, r := range obj.ResourceAttributes.LabelSelector.Requirements { + requirements = append(requirements, map[string]interface{}{ + "key": r.Key, + "operator": r.Operator, + "values": r.Values, + }) + } + resourceAttributes[labelSelectorVarName] = map[string]interface{}{"requirements": requirements} + } + if len(obj.ResourceAttributes.LabelSelector.RawSelector) > 0 { + resourceAttributes[labelSelectorVarName] = map[string]interface{}{"rawSelector": obj.ResourceAttributes.LabelSelector.RawSelector} + } + } + + ret["resourceAttributes"] = resourceAttributes + } + if obj.NonResourceAttributes != nil { + ret["nonResourceAttributes"] = map[string]string{ + "verb": obj.NonResourceAttributes.Verb, + "path": obj.NonResourceAttributes.Path, + } + } + return ret +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go new file mode 100644 index 00000000..82166830 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/interface.go @@ -0,0 +1,41 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + celgo "github.com/google/cel-go/cel" +) + +type ExpressionAccessor interface { + GetExpression() string + ReturnTypes() []*celgo.Type +} + +var _ ExpressionAccessor = &SubjectAccessReviewMatchCondition{} + +// SubjectAccessReviewMatchCondition is a CEL expression that maps a SubjectAccessReview request to a list of values. +type SubjectAccessReviewMatchCondition struct { + Expression string +} + +func (v *SubjectAccessReviewMatchCondition) GetExpression() string { + return v.Expression +} + +func (v *SubjectAccessReviewMatchCondition) ReturnTypes() []*celgo.Type { + return []*celgo.Type{celgo.BoolType} +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go new file mode 100644 index 00000000..eac97100 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/matcher.go @@ -0,0 +1,91 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "fmt" + "time" + + celgo "github.com/google/cel-go/cel" + + authorizationv1 "k8s.io/api/authorization/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type CELMatcher struct { + CompilationResults []CompilationResult + + // These track if any expressions use fieldSelector and labelSelector, + // so construction of data passed to the CEL expression can be optimized if those fields are unused. + UsesLabelSelector bool + UsesFieldSelector bool + + // These are optional fields which can be populated if metrics reporting is desired + Metrics MatcherMetrics + AuthorizerType string + AuthorizerName string +} + +// eval evaluates the given SubjectAccessReview against all cel matchCondition expression +func (c *CELMatcher) Eval(ctx context.Context, r *authorizationv1.SubjectAccessReview) (bool, error) { + var evalErrors []error + + metrics := c.Metrics + if metrics == nil { + metrics = NoopMatcherMetrics{} + } + start := time.Now() + defer func() { + metrics.RecordAuthorizationMatchConditionEvaluation(ctx, c.AuthorizerType, c.AuthorizerName, time.Since(start)) + if len(evalErrors) > 0 { + metrics.RecordAuthorizationMatchConditionEvaluationFailure(ctx, c.AuthorizerType, c.AuthorizerName) + } + }() + + va := map[string]interface{}{ + "request": convertObjectToUnstructured(&r.Spec, c.UsesFieldSelector, c.UsesLabelSelector), + } + for _, compilationResult := range c.CompilationResults { + evalResult, _, err := compilationResult.Program.ContextEval(ctx, va) + if err != nil { + evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' resulted in error: %w", compilationResult.ExpressionAccessor.GetExpression(), err)) + continue + } + if evalResult.Type() != celgo.BoolType { + evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' eval result type should be bool but got %W", compilationResult.ExpressionAccessor.GetExpression(), evalResult.Type())) + continue + } + match, ok := evalResult.Value().(bool) + if !ok { + evalErrors = append(evalErrors, fmt.Errorf("cel evaluation error: expression '%v' eval result value should be bool but got %W", compilationResult.ExpressionAccessor.GetExpression(), evalResult.Value())) + continue + } + // If at least one matchCondition successfully evaluates to FALSE, + // return early + if !match { + metrics.RecordAuthorizationMatchConditionExclusion(ctx, c.AuthorizerType, c.AuthorizerName) + return false, nil + } + } + // if there is any error, return + if len(evalErrors) > 0 { + return false, utilerrors.NewAggregate(evalErrors) + } + // return ALL matchConditions evaluate to TRUE successfully without error + return true, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/authorization/cel/metrics.go b/vendor/k8s.io/apiserver/pkg/authorization/cel/metrics.go new file mode 100644 index 00000000..c9431a70 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authorization/cel/metrics.go @@ -0,0 +1,120 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "context" + "sync" + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// MatcherMetrics defines methods for reporting matchCondition metrics +type MatcherMetrics interface { + // RecordAuthorizationMatchConditionEvaluation records the total time taken to evaluate matchConditions for an Authorize() call to the given authorizer + RecordAuthorizationMatchConditionEvaluation(ctx context.Context, authorizerType, authorizerName string, elapsed time.Duration) + // RecordAuthorizationMatchConditionEvaluationFailure increments if any evaluation error was encountered evaluating matchConditions for an Authorize() call to the given authorizer + RecordAuthorizationMatchConditionEvaluationFailure(ctx context.Context, authorizerType, authorizerName string) + // RecordAuthorizationMatchConditionExclusion records increments when at least one matchCondition evaluates to false and excludes an Authorize() call to the given authorizer + RecordAuthorizationMatchConditionExclusion(ctx context.Context, authorizerType, authorizerName string) +} + +type NoopMatcherMetrics struct{} + +func (NoopMatcherMetrics) RecordAuthorizationMatchConditionEvaluation(ctx context.Context, authorizerType, authorizerName string, elapsed time.Duration) { +} +func (NoopMatcherMetrics) RecordAuthorizationMatchConditionEvaluationFailure(ctx context.Context, authorizerType, authorizerName string) { +} +func (NoopMatcherMetrics) RecordAuthorizationMatchConditionExclusion(ctx context.Context, authorizerType, authorizerName string) { +} + +type matcherMetrics struct{} + +func NewMatcherMetrics() MatcherMetrics { + RegisterMetrics() + return matcherMetrics{} +} + +const ( + namespace = "apiserver" + subsystem = "authorization" +) + +var ( + authorizationMatchConditionEvaluationErrorsTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "match_condition_evaluation_errors_total", + Help: "Total number of errors when an authorization webhook encounters a match condition error split by authorizer type and name.", + StabilityLevel: metrics.ALPHA, + }, + []string{"type", "name"}, + ) + authorizationMatchConditionExclusionsTotal = metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "match_condition_exclusions_total", + Help: "Total number of exclusions when an authorization webhook is skipped because match conditions exclude it.", + StabilityLevel: metrics.ALPHA, + }, + []string{"type", "name"}, + ) + authorizationMatchConditionEvaluationSeconds = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "match_condition_evaluation_seconds", + Help: "Authorization match condition evaluation time in seconds, split by authorizer type and name.", + Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.1, 0.2, 0.25}, + StabilityLevel: metrics.ALPHA, + }, + []string{"type", "name"}, + ) +) + +var registerMetrics sync.Once + +func RegisterMetrics() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(authorizationMatchConditionEvaluationErrorsTotal) + legacyregistry.MustRegister(authorizationMatchConditionExclusionsTotal) + legacyregistry.MustRegister(authorizationMatchConditionEvaluationSeconds) + }) +} + +func ResetMetricsForTest() { + authorizationMatchConditionEvaluationErrorsTotal.Reset() + authorizationMatchConditionExclusionsTotal.Reset() + authorizationMatchConditionEvaluationSeconds.Reset() +} + +func (matcherMetrics) RecordAuthorizationMatchConditionEvaluationFailure(ctx context.Context, authorizerType, authorizerName string) { + authorizationMatchConditionEvaluationErrorsTotal.WithContext(ctx).WithLabelValues(authorizerType, authorizerName).Inc() +} + +func (matcherMetrics) RecordAuthorizationMatchConditionExclusion(ctx context.Context, authorizerType, authorizerName string) { + authorizationMatchConditionExclusionsTotal.WithContext(ctx).WithLabelValues(authorizerType, authorizerName).Inc() +} + +func (matcherMetrics) RecordAuthorizationMatchConditionEvaluation(ctx context.Context, authorizerType, authorizerName string, elapsed time.Duration) { + elapsedSeconds := elapsed.Seconds() + authorizationMatchConditionEvaluationSeconds.WithContext(ctx).WithLabelValues(authorizerType, authorizerName).Observe(elapsedSeconds) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/OWNERS b/vendor/k8s.io/apiserver/pkg/cel/OWNERS new file mode 100644 index 00000000..f550fc17 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Kubernetes CEL library authors and maintainers +approvers: + - jpbetz + - cici37 + - jiahuif +reviewers: + - jpbetz + - cici37 + - jiahuif diff --git a/vendor/k8s.io/apiserver/pkg/cel/cidr.go b/vendor/k8s.io/apiserver/pkg/cel/cidr.go new file mode 100644 index 00000000..8e97f63c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/cidr.go @@ -0,0 +1,87 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "math" + "net/netip" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// CIDR provides a CEL representation of an network address. +type CIDR struct { + netip.Prefix +} + +var ( + CIDRType = cel.OpaqueType("net.CIDR") +) + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d CIDR) ConvertToNative(typeDesc reflect.Type) (any, error) { + if reflect.TypeOf(d.Prefix).AssignableTo(typeDesc) { + return d.Prefix, nil + } + if reflect.TypeOf("").AssignableTo(typeDesc) { + return d.Prefix.String(), nil + } + return nil, fmt.Errorf("type conversion error from 'CIDR' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d CIDR) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case CIDRType: + return d + case types.TypeType: + return CIDRType + case types.StringType: + return types.String(d.Prefix.String()) + } + return types.NewErr("type conversion error from '%s' to '%s'", CIDRType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (d CIDR) Equal(other ref.Val) ref.Val { + otherD, ok := other.(CIDR) + if !ok { + return types.ValOrErr(other, "no such overload") + } + + return types.Bool(d.Prefix == otherD.Prefix) +} + +// Type implements ref.Val.Type. +func (d CIDR) Type() ref.Type { + return CIDRType +} + +// Value implements ref.Val.Value. +func (d CIDR) Value() any { + return d.Prefix +} + +// Size returns the size of the CIDR prefix address in bytes. +// Used in the size estimation of the runtime cost. +func (d CIDR) Size() ref.Val { + return types.Int(int(math.Ceil(float64(d.Prefix.Bits()) / 8))) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/base.go b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go new file mode 100644 index 00000000..563d34e1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/base.go @@ -0,0 +1,241 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package environment + +import ( + "fmt" + "strconv" + "sync" + "sync/atomic" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker" + "github.com/google/cel-go/ext" + "github.com/google/cel-go/interpreter" + "golang.org/x/sync/singleflight" + + "k8s.io/apimachinery/pkg/util/version" + celconfig "k8s.io/apiserver/pkg/apis/cel" + "k8s.io/apiserver/pkg/cel/library" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + utilversion "k8s.io/apiserver/pkg/util/version" +) + +// DefaultCompatibilityVersion returns a default compatibility version for use with EnvSet +// that guarantees compatibility with CEL features/libraries/parameters understood by +// the api server min compatibility version +// +// This default will be set to no more than the current Kubernetes major.minor version. +// +// Note that a default version number less than n-1 the current Kubernetes major.minor version +// indicates a wider range of version compatibility than strictly required for rollback. +// A wide range of compatibility is desirable because it means that CEL expressions are portable +// across a wider range of Kubernetes versions. +// A default version number equal to the current Kubernetes major.minor version +// indicates fast forward CEL features that can be used when rollback is no longer needed. +func DefaultCompatibilityVersion() *version.Version { + effectiveVer := utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent) + if effectiveVer == nil { + effectiveVer = utilversion.DefaultKubeEffectiveVersion() + } + return effectiveVer.MinCompatibilityVersion() +} + +var baseOpts = append(baseOptsWithoutStrictCost, StrictCostOpt) + +var baseOptsWithoutStrictCost = []VersionedOptions{ + { + // CEL epoch was actually 1.23, but we artificially set it to 1.0 because these + // options should always be present. + IntroducedVersion: version.MajorMinor(1, 0), + EnvOptions: []cel.EnvOption{ + cel.HomogeneousAggregateLiterals(), + // Validate function declarations once during base env initialization, + // so they don't need to be evaluated each time a CEL rule is compiled. + // This is a relatively expensive operation. + cel.EagerlyValidateDeclarations(true), + cel.DefaultUTCTimeZone(true), + + library.URLs(), + library.Regex(), + library.Lists(), + + // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. + // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. + cel.CostEstimatorOptions(checker.PresenceTestHasCost(false)), + }, + ProgramOptions: []cel.ProgramOption{ + cel.EvalOptions(cel.OptOptimize, cel.OptTrackCost), + cel.CostLimit(celconfig.PerCallLimit), + + // cel-go v0.17.7 change the cost of has() from 0 to 1, but also provided the CostEstimatorOptions option to preserve the old behavior, so we enabled it at the same time we bumped our cel version to v0.17.7. + // Since it is a regression fix, we apply it uniformly to all code use v0.17.7. + cel.CostTrackerOptions(interpreter.PresenceTestHasCost(false)), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 27), + EnvOptions: []cel.EnvOption{ + library.Authz(), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 28), + EnvOptions: []cel.EnvOption{ + cel.CrossTypeNumericComparisons(true), + cel.OptionalTypes(), + library.Quantity(), + }, + }, + // add the new validator in 1.29 + { + IntroducedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + cel.ASTValidators( + cel.ValidateDurationLiterals(), + cel.ValidateTimestampLiterals(), + cel.ValidateRegexLiterals(), + cel.ValidateHomogeneousAggregateLiterals(), + ), + }, + }, + // String library + { + IntroducedVersion: version.MajorMinor(1, 0), + RemovedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + ext.Strings(ext.StringsVersion(0)), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + ext.Strings(ext.StringsVersion(2)), + }, + }, + // Set library + { + IntroducedVersion: version.MajorMinor(1, 29), + EnvOptions: []cel.EnvOption{ + ext.Sets(), + }, + }, + { + IntroducedVersion: version.MajorMinor(1, 30), + EnvOptions: []cel.EnvOption{ + library.IP(), + library.CIDR(), + }, + }, + // Format Library + { + IntroducedVersion: version.MajorMinor(1, 31), + EnvOptions: []cel.EnvOption{ + library.Format(), + }, + }, + // Authz selectors + { + IntroducedVersion: version.MajorMinor(1, 31), + FeatureEnabled: func() bool { + enabled := utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) + authzSelectorsLibraryInit.Do(func() { + // Record the first time feature enablement was checked for this library. + // This is checked from integration tests to ensure no cached cel envs + // are constructed before feature enablement is effectively set. + authzSelectorsLibraryEnabled.Store(enabled) + // Uncomment to debug where the first initialization is coming from if needed. + // debug.PrintStack() + }) + return enabled + }, + EnvOptions: []cel.EnvOption{ + library.AuthzSelectors(), + }, + }, +} + +var ( + authzSelectorsLibraryInit sync.Once + authzSelectorsLibraryEnabled atomic.Value +) + +// AuthzSelectorsLibraryEnabled returns whether the AuthzSelectors library was enabled when it was constructed. +// If it has not been contructed yet, this returns `false, false`. +// This is solely for the benefit of the integration tests making sure feature gates get correctly parsed before AuthzSelector ever has to check for enablement. +func AuthzSelectorsLibraryEnabled() (enabled, constructed bool) { + enabled, constructed = authzSelectorsLibraryEnabled.Load().(bool) + return +} + +var StrictCostOpt = VersionedOptions{ + // This is to configure the cost calculation for extended libraries + IntroducedVersion: version.MajorMinor(1, 0), + ProgramOptions: []cel.ProgramOption{ + cel.CostTracking(&library.CostEstimator{}), + }, +} + +// MustBaseEnvSet returns the common CEL base environments for Kubernetes for Version, or panics +// if the version is nil, or does not have major and minor components. +// +// The returned environment contains function libraries, language settings, optimizations and +// runtime cost limits appropriate CEL as it is used in Kubernetes. +// +// The returned environment contains no CEL variable definitions or custom type declarations and +// should be extended to construct environments with the appropriate variable definitions, +// type declarations and any other needed configuration. +// strictCost is used to determine whether to enforce strict cost calculation for CEL expressions. +func MustBaseEnvSet(ver *version.Version, strictCost bool) *EnvSet { + if ver == nil { + panic("version must be non-nil") + } + if len(ver.Components()) < 2 { + panic(fmt.Sprintf("version must contain an major and minor component, but got: %s", ver.String())) + } + key := strconv.FormatUint(uint64(ver.Major()), 10) + "." + strconv.FormatUint(uint64(ver.Minor()), 10) + var entry interface{} + if strictCost { + if entry, ok := baseEnvs.Load(key); ok { + return entry.(*EnvSet) + } + entry, _, _ = baseEnvsSingleflight.Do(key, func() (interface{}, error) { + entry := mustNewEnvSet(ver, baseOpts) + baseEnvs.Store(key, entry) + return entry, nil + }) + } else { + if entry, ok := baseEnvsWithOption.Load(key); ok { + return entry.(*EnvSet) + } + entry, _, _ = baseEnvsWithOptionSingleflight.Do(key, func() (interface{}, error) { + entry := mustNewEnvSet(ver, baseOptsWithoutStrictCost) + baseEnvsWithOption.Store(key, entry) + return entry, nil + }) + } + + return entry.(*EnvSet) +} + +var ( + baseEnvs = sync.Map{} + baseEnvsWithOption = sync.Map{} + baseEnvsSingleflight = &singleflight.Group{} + baseEnvsWithOptionSingleflight = &singleflight.Group{} +) diff --git a/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go b/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go new file mode 100644 index 00000000..07b9e8f5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/environment/environment.go @@ -0,0 +1,298 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package environment + +import ( + "fmt" + "math" + + "github.com/google/cel-go/cel" + + "k8s.io/apimachinery/pkg/util/version" + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// Type defines the different types of CEL environments used in Kubernetes. +// CEL environments are used to compile and evaluate CEL expressions. +// Environments include: +// - Function libraries +// - Variables +// - Types (both core CEL types and Kubernetes types) +// - Other CEL environment and program options +type Type string + +const ( + // NewExpressions is used to validate new or modified expressions in + // requests that write expressions to API resources. + // + // This environment type is compatible with a specific Kubernetes + // major/minor version. To ensure safe rollback, this environment type + // may not include all the function libraries, variables, type declarations, and CEL + // language settings available in the StoredExpressions environment type. + // + // NewExpressions must be used to validate (parse, compile, type check) + // all new or modified CEL expressions before they are written to storage. + NewExpressions Type = "NewExpressions" + + // StoredExpressions is used to compile and run CEL expressions that have been + // persisted to storage. + // + // This environment type is compatible with CEL expressions that have been + // persisted to storage by all known versions of Kubernetes. This is the most + // permissive environment available. + // + // StoredExpressions is appropriate for use with CEL expressions in + // configuration files. + StoredExpressions Type = "StoredExpressions" +) + +// EnvSet manages the creation and extension of CEL environments. Each EnvSet contains +// both an NewExpressions and StoredExpressions environment. EnvSets are created +// and extended using VersionedOptions so that the EnvSet can prepare environments according +// to what options were introduced at which versions. +// +// Each EnvSet is given a compatibility version when it is created, and prepares the +// NewExpressions environment to be compatible with that version. The EnvSet also +// prepares StoredExpressions to be compatible with all known versions of Kubernetes. +type EnvSet struct { + // compatibilityVersion is the version that all configuration in + // the NewExpressions environment is compatible with. + compatibilityVersion *version.Version + + // newExpressions is an environment containing only configuration + // in this EnvSet that is enabled at this compatibilityVersion. + newExpressions *cel.Env + + // storedExpressions is an environment containing the latest configuration + // in this EnvSet. + storedExpressions *cel.Env +} + +func newEnvSet(compatibilityVersion *version.Version, opts []VersionedOptions) (*EnvSet, error) { + base, err := cel.NewEnv() + if err != nil { + return nil, err + } + baseSet := EnvSet{compatibilityVersion: compatibilityVersion, newExpressions: base, storedExpressions: base} + return baseSet.Extend(opts...) +} + +func mustNewEnvSet(ver *version.Version, opts []VersionedOptions) *EnvSet { + envSet, err := newEnvSet(ver, opts) + if err != nil { + panic(fmt.Sprintf("Default environment misconfigured: %v", err)) + } + return envSet +} + +// NewExpressionsEnv returns the NewExpressions environment Type for this EnvSet. +// See NewExpressions for details. +func (e *EnvSet) NewExpressionsEnv() *cel.Env { + return e.newExpressions +} + +// StoredExpressionsEnv returns the StoredExpressions environment Type for this EnvSet. +// See StoredExpressions for details. +func (e *EnvSet) StoredExpressionsEnv() *cel.Env { + return e.storedExpressions +} + +// Env returns the CEL environment for the given Type. +func (e *EnvSet) Env(envType Type) (*cel.Env, error) { + switch envType { + case NewExpressions: + return e.newExpressions, nil + case StoredExpressions: + return e.storedExpressions, nil + default: + return nil, fmt.Errorf("unsupported environment type: %v", envType) + } +} + +// VersionedOptions provides a set of CEL configuration options as well as the version the +// options were introduced and, optionally, the version the options were removed. +type VersionedOptions struct { + // IntroducedVersion is the version at which these options were introduced. + // The NewExpressions environment will only include options introduced at or before the + // compatibility version of the EnvSet. + // + // For example, to configure a CEL environment with an "object" variable bound to a + // resource kind, first create a DeclType from the groupVersionKind of the resource and then + // populate a VersionedOptions with the variable and the type: + // + // schema := schemaResolver.ResolveSchema(groupVersionKind) + // objectType := apiservercel.SchemaDeclType(schema, true) + // ... + // VersionOptions{ + // IntroducedVersion: version.MajorMinor(1, 26), + // DeclTypes: []*apiservercel.DeclType{ objectType }, + // EnvOptions: []cel.EnvOption{ cel.Variable("object", objectType.CelType()) }, + // }, + // + // To create an DeclType from a CRD, use a structural schema. For example: + // + // schema := structuralschema.NewStructural(crdJSONProps) + // objectType := apiservercel.SchemaDeclType(schema, true) + // + // Required. + IntroducedVersion *version.Version + // RemovedVersion is the version at which these options were removed. + // The NewExpressions environment will not include options removed at or before the + // compatibility version of the EnvSet. + // + // All option removals must be backward compatible; the removal must either be paired + // with a compatible replacement introduced at the same version, or the removal must be non-breaking. + // The StoredExpressions environment will not include removed options. + // + // A function library may be upgraded by setting the RemovedVersion of the old library + // to the same value as the IntroducedVersion of the new library. The new library must + // be backward compatible with the old library. + // + // For example: + // + // VersionOptions{ + // IntroducedVersion: version.MajorMinor(1, 26), RemovedVersion: version.MajorMinor(1, 27), + // EnvOptions: []cel.EnvOption{ libraries.Example(libraries.ExampleVersion(1)) }, + // }, + // VersionOptions{ + // IntroducedVersion: version.MajorMinor(1, 27), + // EnvOptions: []EnvOptions{ libraries.Example(libraries.ExampleVersion(2)) }, + // }, + // + // Optional. + RemovedVersion *version.Version + // FeatureEnabled returns true if these options are enabled by feature gates, + // and returns false if these options are not enabled due to feature gates. + // + // This takes priority over IntroducedVersion / RemovedVersion for the NewExpressions environment. + // + // The StoredExpressions environment ignores this function. + // + // Optional. + FeatureEnabled func() bool + // EnvOptions provides CEL EnvOptions. This may be used to add a cel.Variable, a + // cel.Library, or to enable other CEL EnvOptions such as language settings. + // + // If an added cel.Variable has an OpenAPI type, the type must be included in DeclTypes. + EnvOptions []cel.EnvOption + // ProgramOptions provides CEL ProgramOptions. This may be used to set a cel.CostLimit, + // enable optimizations, and set other program level options that should be enabled + // for all programs using this environment. + ProgramOptions []cel.ProgramOption + // DeclTypes provides OpenAPI type declarations to register with the environment. + // + // If cel.Variables added to EnvOptions refer to a OpenAPI type, the type must be included in + // DeclTypes. + DeclTypes []*apiservercel.DeclType +} + +// Extend returns an EnvSet based on this EnvSet but extended with given VersionedOptions. +// This EnvSet is not mutated. +// The returned EnvSet has the same compatibility version as the EnvSet that was extended. +// +// Extend is an expensive operation and each call to Extend that adds DeclTypes increases +// the depth of a chain of resolvers. For these reasons, calls to Extend should be kept +// to a minimum. +// +// Some best practices: +// +// - Minimize calls Extend when handling API requests. Where possible, call Extend +// when initializing components. +// - If an EnvSets returned by Extend can be used to compile multiple CEL programs, +// call Extend once and reuse the returned EnvSets. +// - Prefer a single call to Extend with a full list of VersionedOptions over +// making multiple calls to Extend. +func (e *EnvSet) Extend(options ...VersionedOptions) (*EnvSet, error) { + if len(options) > 0 { + newExprOpts, err := e.filterAndBuildOpts(e.newExpressions, e.compatibilityVersion, true, options) + if err != nil { + return nil, err + } + p, err := e.newExpressions.Extend(newExprOpts) + if err != nil { + return nil, err + } + storedExprOpt, err := e.filterAndBuildOpts(e.storedExpressions, version.MajorMinor(math.MaxUint, math.MaxUint), false, options) + if err != nil { + return nil, err + } + s, err := e.storedExpressions.Extend(storedExprOpt) + if err != nil { + return nil, err + } + return &EnvSet{compatibilityVersion: e.compatibilityVersion, newExpressions: p, storedExpressions: s}, nil + } + return e, nil +} + +func (e *EnvSet) filterAndBuildOpts(base *cel.Env, compatVer *version.Version, honorFeatureGateEnablement bool, opts []VersionedOptions) (cel.EnvOption, error) { + var envOpts []cel.EnvOption + var progOpts []cel.ProgramOption + var declTypes []*apiservercel.DeclType + + for _, opt := range opts { + var allowedByFeatureGate, allowedByVersion bool + if opt.FeatureEnabled != nil && honorFeatureGateEnablement { + // Feature-gate-enabled libraries must follow compatible default feature enablement. + // Enabling alpha features in their first release enables libraries the previous API server is unaware of. + allowedByFeatureGate = opt.FeatureEnabled() + if !allowedByFeatureGate { + continue + } + } + if compatVer.AtLeast(opt.IntroducedVersion) && (opt.RemovedVersion == nil || compatVer.LessThan(opt.RemovedVersion)) { + allowedByVersion = true + } + + if allowedByFeatureGate || allowedByVersion { + envOpts = append(envOpts, opt.EnvOptions...) + progOpts = append(progOpts, opt.ProgramOptions...) + declTypes = append(declTypes, opt.DeclTypes...) + } + } + + if len(declTypes) > 0 { + provider := apiservercel.NewDeclTypeProvider(declTypes...) + if compatVer.AtLeast(version.MajorMinor(1, 31)) { + provider.SetRecognizeKeywordAsFieldName(true) + } + providerOpts, err := provider.EnvOptions(base.CELTypeProvider()) + if err != nil { + return nil, err + } + envOpts = append(envOpts, providerOpts...) + } + + combined := cel.Lib(&envLoader{ + envOpts: envOpts, + progOpts: progOpts, + }) + return combined, nil +} + +type envLoader struct { + envOpts []cel.EnvOption + progOpts []cel.ProgramOption +} + +func (e *envLoader) CompileOptions() []cel.EnvOption { + return e.envOpts +} + +func (e *envLoader) ProgramOptions() []cel.ProgramOption { + return e.progOpts +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/errors.go b/vendor/k8s.io/apiserver/pkg/cel/errors.go new file mode 100644 index 00000000..d7b052fc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/errors.go @@ -0,0 +1,124 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + + "github.com/google/cel-go/cel" +) + +// ErrInternal the basic error that occurs when the expression fails to evaluate +// due to internal reasons. Any Error that has the Type of +// ErrorInternal is considered equal to ErrInternal +var ErrInternal = fmt.Errorf("internal") + +// ErrInvalid is the basic error that occurs when the expression fails to +// evaluate but not due to internal reasons. Any Error that has the Type of +// ErrorInvalid is considered equal to ErrInvalid. +var ErrInvalid = fmt.Errorf("invalid") + +// ErrRequired is the basic error that occurs when the expression is required +// but absent. +// Any Error that has the Type of ErrorRequired is considered equal +// to ErrRequired. +var ErrRequired = fmt.Errorf("required") + +// ErrCompilation is the basic error that occurs when the expression fails to +// compile. Any CompilationError wraps ErrCompilation. +// ErrCompilation wraps ErrInvalid +var ErrCompilation = fmt.Errorf("%w: compilation error", ErrInvalid) + +// ErrOutOfBudget is the basic error that occurs when the expression fails due to +// exceeding budget. +var ErrOutOfBudget = fmt.Errorf("out of budget") + +// Error is an implementation of the 'error' interface, which represents a +// XValidation error. +type Error struct { + Type ErrorType + Detail string + + // Cause is an optional wrapped errors that can be useful to + // programmatically retrieve detailed errors. + Cause error +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return v.Detail +} + +func (v *Error) Is(err error) bool { + switch v.Type { + case ErrorTypeRequired: + return err == ErrRequired + case ErrorTypeInvalid: + return err == ErrInvalid + case ErrorTypeInternal: + return err == ErrInternal + } + return false +} + +// Unwrap returns the wrapped Cause. +func (v *Error) Unwrap() error { + return v.Cause +} + +// ErrorType is a machine-readable value providing more detail about why +// a XValidation is invalid. +type ErrorType string + +const ( + // ErrorTypeRequired is used to report withNullable values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "RuleRequired" + // ErrorTypeInvalid is used to report malformed values + ErrorTypeInvalid ErrorType = "RuleInvalid" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// CompilationError indicates an error during expression compilation. +// It wraps ErrCompilation. +type CompilationError struct { + err *Error + Issues *cel.Issues +} + +// NewCompilationError wraps a cel.Issues to indicate a compilation failure. +func NewCompilationError(issues *cel.Issues) *CompilationError { + return &CompilationError{ + Issues: issues, + err: &Error{ + Type: ErrorTypeInvalid, + Detail: fmt.Sprintf("compilation error: %s", issues), + }} +} + +func (e *CompilationError) Error() string { + return e.err.Error() +} + +func (e *CompilationError) Unwrap() []error { + return []error{e.err, ErrCompilation} +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/escaping.go b/vendor/k8s.io/apiserver/pkg/cel/escaping.go new file mode 100644 index 00000000..705c353a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/escaping.go @@ -0,0 +1,170 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "regexp" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// celReservedSymbols is a list of RESERVED symbols defined in the CEL lexer. +// No identifiers are allowed to collide with these symbols. +// https://github.com/google/cel-spec/blob/master/doc/langdef.md#syntax +var celReservedSymbols = sets.NewString( + "true", "false", "null", "in", + "as", "break", "const", "continue", "else", + "for", "function", "if", "import", "let", + "loop", "package", "namespace", "return", // !! 'namespace' is used heavily in Kubernetes + "var", "void", "while", +) + +// expandMatcher matches the escape sequence, characters that are escaped, and characters that are unsupported +var expandMatcher = regexp.MustCompile(`(__|[-./]|[^a-zA-Z0-9-./_])`) + +// newCharacterFilter returns a boolean array to indicate the allowed characters +func newCharacterFilter(characters string) []bool { + maxChar := 0 + for _, c := range characters { + if maxChar < int(c) { + maxChar = int(c) + } + } + filter := make([]bool, maxChar+1) + + for _, c := range characters { + filter[int(c)] = true + } + + return filter +} + +type escapeCheck struct { + canSkipRegex bool + invalidCharFound bool +} + +// skipRegexCheck checks if escape would be skipped. +// if invalidCharFound is true, it must have invalid character; if invalidCharFound is false, not sure if it has invalid character or not +func skipRegexCheck(ident string) escapeCheck { + escapeCheck := escapeCheck{canSkipRegex: true, invalidCharFound: false} + // skip escape if possible + previous_underscore := false + for _, c := range ident { + if c == '/' || c == '-' || c == '.' { + escapeCheck.canSkipRegex = false + return escapeCheck + } + intc := int(c) + if intc < 0 || intc >= len(validCharacterFilter) || !validCharacterFilter[intc] { + escapeCheck.invalidCharFound = true + return escapeCheck + } + if c == '_' && previous_underscore { + escapeCheck.canSkipRegex = false + return escapeCheck + } + + previous_underscore = c == '_' + } + return escapeCheck +} + +// validCharacterFilter indicates the allowed characters. +var validCharacterFilter = newCharacterFilter("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_") + +// Escape escapes ident and returns a CEL identifier (of the form '[a-zA-Z_][a-zA-Z0-9_]*'), or returns +// false if the ident does not match the supported input format of `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*`. +// Escaping Rules: +// - '__' escapes to '__underscores__' +// - '.' escapes to '__dot__' +// - '-' escapes to '__dash__' +// - '/' escapes to '__slash__' +// - Identifiers that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are: "true", "false", +// "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if", "import", "let", loop", "package", +// "namespace", "return". +func Escape(ident string) (string, bool) { + if len(ident) == 0 || ('0' <= ident[0] && ident[0] <= '9') { + return "", false + } + if celReservedSymbols.Has(ident) { + return "__" + ident + "__", true + } + + escapeCheck := skipRegexCheck(ident) + if escapeCheck.invalidCharFound { + return "", false + } + if escapeCheck.canSkipRegex { + return ident, true + } + + ok := true + ident = expandMatcher.ReplaceAllStringFunc(ident, func(s string) string { + switch s { + case "__": + return "__underscores__" + case ".": + return "__dot__" + case "-": + return "__dash__" + case "/": + return "__slash__" + default: // matched a unsupported supported + ok = false + return "" + } + }) + if !ok { + return "", false + } + return ident, true +} + +var unexpandMatcher = regexp.MustCompile(`(_{2}[^_]+_{2})`) + +// Unescape unescapes an CEL identifier containing the escape sequences described in Escape, or return false if the +// string contains invalid escape sequences. The escaped input is expected to be a valid CEL identifier, but is +// not checked. +func Unescape(escaped string) (string, bool) { + ok := true + escaped = unexpandMatcher.ReplaceAllStringFunc(escaped, func(s string) string { + contents := s[2 : len(s)-2] + switch contents { + case "underscores": + return "__" + case "dot": + return "." + case "dash": + return "-" + case "slash": + return "/" + } + if celReservedSymbols.Has(contents) { + if len(s) != len(escaped) { + ok = false + } + return contents + } + ok = false + return "" + }) + if !ok { + return "", false + } + return escaped, true +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/format.go b/vendor/k8s.io/apiserver/pkg/cel/format.go new file mode 100644 index 00000000..1bcfddfe --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/format.go @@ -0,0 +1,73 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +var ( + FormatObject = decls.NewObjectType("kubernetes.NamedFormat") + FormatType = cel.ObjectType("kubernetes.NamedFormat") +) + +// Format provdes a CEL representation of kubernetes format +type Format struct { + Name string + ValidateFunc func(string) []string + + // Size of the regex string or estimated equivalent regex string used + // for cost estimation + MaxRegexSize int +} + +func (d *Format) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + return nil, fmt.Errorf("type conversion error from 'Format' to '%v'", typeDesc) +} + +func (d *Format) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case FormatType: + return d + case types.TypeType: + return FormatType + default: + return types.NewErr("type conversion error from '%s' to '%s'", FormatType, typeVal) + } +} + +func (d *Format) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(*Format) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(d.Name == otherDur.Name) +} + +func (d *Format) Type() ref.Type { + return FormatType +} + +func (d *Format) Value() interface{} { + return d +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/ip.go b/vendor/k8s.io/apiserver/pkg/cel/ip.go new file mode 100644 index 00000000..f91c6cb7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/ip.go @@ -0,0 +1,86 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "math" + "net/netip" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// IP provides a CEL representation of an IP address. +type IP struct { + netip.Addr +} + +var ( + IPType = cel.OpaqueType("net.IP") +) + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d IP) ConvertToNative(typeDesc reflect.Type) (any, error) { + if reflect.TypeOf(d.Addr).AssignableTo(typeDesc) { + return d.Addr, nil + } + if reflect.TypeOf("").AssignableTo(typeDesc) { + return d.Addr.String(), nil + } + return nil, fmt.Errorf("type conversion error from 'IP' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d IP) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case IPType: + return d + case types.TypeType: + return IPType + case types.StringType: + return types.String(d.Addr.String()) + } + return types.NewErr("type conversion error from '%s' to '%s'", IPType, typeVal) +} + +// Equal implements ref.Val.Equal. +func (d IP) Equal(other ref.Val) ref.Val { + otherD, ok := other.(IP) + if !ok { + return types.ValOrErr(other, "no such overload") + } + return types.Bool(d.Addr == otherD.Addr) +} + +// Type implements ref.Val.Type. +func (d IP) Type() ref.Type { + return IPType +} + +// Value implements ref.Val.Value. +func (d IP) Value() any { + return d.Addr +} + +// Size returns the size of the IP address in bytes. +// Used in the size estimation of the runtime cost. +func (d IP) Size() ref.Val { + return types.Int(int(math.Ceil(float64(d.Addr.BitLen()) / 8))) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/authz.go b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go new file mode 100644 index 00000000..1fd489fc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/authz.go @@ -0,0 +1,769 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "context" + "fmt" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" +) + +// Authz provides a CEL function library extension for performing authorization checks. +// Note that authorization checks are only supported for CEL expression fields in the API +// where an 'authorizer' variable is provided to the CEL expression. See the +// documentation of API fields where CEL expressions are used to learn if the 'authorizer' +// variable is provided. +// +// path +// +// Returns a PathCheck configured to check authorization for a non-resource request +// path (e.g. /healthz). If path is an empty string, an error is returned. +// Note that the leading '/' is not required. +// +// .path() +// +// Examples: +// +// authorizer.path('/healthz') // returns a PathCheck for the '/healthz' API path +// authorizer.path('') // results in "path must not be empty" error +// authorizer.path(' ') // results in "path must not be empty" error +// +// group +// +// Returns a GroupCheck configured to check authorization for the API resources for +// a particular API group. +// Note that authorization checks are only supported for CEL expression fields in the API +// where an 'authorizer' variable is provided to the CEL expression. Check the +// documentation of API fields where CEL expressions are used to learn if the 'authorizer' +// variable is provided. +// +// .group() +// +// Examples: +// +// authorizer.group('apps') // returns a GroupCheck for the 'apps' API group +// authorizer.group('') // returns a GroupCheck for the core API group +// authorizer.group('example.com') // returns a GroupCheck for the custom resources in the 'example.com' API group +// +// serviceAccount +// +// Returns an Authorizer configured to check authorization for the provided service account namespace and name. +// If the name is not a valid DNS subdomain string (as defined by RFC 1123), an error is returned. +// If the namespace is not a valid DNS label (as defined by RFC 1123), an error is returned. +// +// .serviceAccount(, ) +// +// Examples: +// +// authorizer.serviceAccount('default', 'myserviceaccount') // returns an Authorizer for the service account with namespace 'default' and name 'myserviceaccount' +// authorizer.serviceAccount('not@a#valid!namespace', 'validname') // returns an error +// authorizer.serviceAccount('valid.example.com', 'invalid@*name') // returns an error +// +// resource +// +// Returns a ResourceCheck configured to check authorization for a particular API resource. +// Note that the provided resource string should be a lower case plural name of a Kubernetes API resource. +// +// .resource() +// +// Examples: +// +// authorizer.group('apps').resource('deployments') // returns a ResourceCheck for the 'deployments' resources in the 'apps' group. +// authorizer.group('').resource('pods') // returns a ResourceCheck for the 'pods' resources in the core group. +// authorizer.group('apps').resource('') // results in "resource must not be empty" error +// authorizer.group('apps').resource(' ') // results in "resource must not be empty" error +// +// subresource +// +// Returns a ResourceCheck configured to check authorization for a particular subresource of an API resource. +// If subresource is set to "", the subresource field of this ResourceCheck is considered unset. +// +// .subresource() +// +// Examples: +// +// authorizer.group('').resource('pods').subresource('status') // returns a ResourceCheck the 'status' subresource of 'pods' +// authorizer.group('apps').resource('deployments').subresource('scale') // returns a ResourceCheck the 'scale' subresource of 'deployments' +// authorizer.group('example.com').resource('widgets').subresource('scale') // returns a ResourceCheck for the 'scale' subresource of the 'widgets' custom resource +// authorizer.group('example.com').resource('widgets').subresource('') // returns a ResourceCheck for the 'widgets' resource. +// +// namespace +// +// Returns a ResourceCheck configured to check authorization for a particular namespace. +// For cluster scoped resources, namespace() does not need to be called; namespace defaults +// to "", which is the correct namespace value to use to check cluster scoped resources. +// If namespace is set to "", the ResourceCheck will check authorization for the cluster scope. +// +// .namespace() +// +// Examples: +// +// authorizer.group('apps').resource('deployments').namespace('test') // returns a ResourceCheck for 'deployments' in the 'test' namespace +// authorizer.group('').resource('pods').namespace('default') // returns a ResourceCheck for 'pods' in the 'default' namespace +// authorizer.group('').resource('widgets').namespace('') // returns a ResourceCheck for 'widgets' in the cluster scope +// +// name +// +// Returns a ResourceCheck configured to check authorization for a particular resource name. +// If name is set to "", the name field of this ResourceCheck is considered unset. +// +// .name() +// +// Examples: +// +// authorizer.group('apps').resource('deployments').namespace('test').name('backend') // returns a ResourceCheck for the 'backend' 'deployments' resource in the 'test' namespace +// authorizer.group('apps').resource('deployments').namespace('test').name('') // returns a ResourceCheck for the 'deployments' resource in the 'test' namespace +// +// check +// +// For PathCheck, checks if the principal (user or service account) that sent the request is authorized for the HTTP request verb of the path. +// For ResourceCheck, checks if the principal (user or service account) that sent the request is authorized for the API verb and the configured authorization checks of the ResourceCheck. +// The check operation can be expensive, particularly in clusters using the webhook authorization mode. +// +// .check() +// .check() +// +// Examples: +// +// authorizer.group('').resource('pods').namespace('default').check('create') // Checks if the principal (user or service account) is authorized create pods in the 'default' namespace. +// authorizer.path('/healthz').check('get') // Checks if the principal (user or service account) is authorized to make HTTP GET requests to the /healthz API path. +// +// allowed +// +// Returns true if the authorizer's decision for the check is "allow". Note that if the authorizer's decision is +// "no opinion", that the 'allowed' function will return false. +// +// .allowed() +// +// Examples: +// +// authorizer.group('').resource('pods').namespace('default').check('create').allowed() // Returns true if the principal (user or service account) is allowed create pods in the 'default' namespace. +// authorizer.path('/healthz').check('get').allowed() // Returns true if the principal (user or service account) is allowed to make HTTP GET requests to the /healthz API path. +// +// reason +// +// Returns a string reason for the authorization decision +// +// .reason() +// +// Examples: +// +// authorizer.path('/healthz').check('GET').reason() +// +// errored +// +// Returns true if the authorization check resulted in an error. +// +// .errored() +// +// Examples: +// +// authorizer.group('').resource('pods').namespace('default').check('create').errored() // Returns true if the authorization check resulted in an error +// +// error +// +// If the authorization check resulted in an error, returns the error. Otherwise, returns the empty string. +// +// .error() +// +// Examples: +// +// authorizer.group('').resource('pods').namespace('default').check('create').error() +// +// fieldSelector +// +// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check. +// If the field selector does not parse successfully, no field selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+, Authz library version 1. +// +// .fieldSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed() +// +// labelSelector (added in v1, Kubernetes 1.31+) +// +// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check. +// If the label selector does not parse successfully, no label selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+, Authz library version 1. +// +// .labelSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed() +func Authz() cel.EnvOption { + return cel.Lib(authzLib) +} + +var authzLib = &authz{} + +type authz struct{} + +func (*authz) LibraryName() string { + return "k8s.authz" +} + +var authzLibraryDecls = map[string][]cel.FunctionOpt{ + "path": { + cel.MemberOverload("authorizer_path", []*cel.Type{AuthorizerType, cel.StringType}, PathCheckType, + cel.BinaryBinding(authorizerPath))}, + "group": { + cel.MemberOverload("authorizer_group", []*cel.Type{AuthorizerType, cel.StringType}, GroupCheckType, + cel.BinaryBinding(authorizerGroup))}, + "serviceAccount": { + cel.MemberOverload("authorizer_serviceaccount", []*cel.Type{AuthorizerType, cel.StringType, cel.StringType}, AuthorizerType, + cel.FunctionBinding(authorizerServiceAccount))}, + "resource": { + cel.MemberOverload("groupcheck_resource", []*cel.Type{GroupCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(groupCheckResource))}, + "subresource": { + cel.MemberOverload("resourcecheck_subresource", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckSubresource))}, + "namespace": { + cel.MemberOverload("resourcecheck_namespace", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckNamespace))}, + "name": { + cel.MemberOverload("resourcecheck_name", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckName))}, + "check": { + cel.MemberOverload("pathcheck_check", []*cel.Type{PathCheckType, cel.StringType}, DecisionType, + cel.BinaryBinding(pathCheckCheck)), + cel.MemberOverload("resourcecheck_check", []*cel.Type{ResourceCheckType, cel.StringType}, DecisionType, + cel.BinaryBinding(resourceCheckCheck))}, + "errored": { + cel.MemberOverload("decision_errored", []*cel.Type{DecisionType}, cel.BoolType, + cel.UnaryBinding(decisionErrored))}, + "error": { + cel.MemberOverload("decision_error", []*cel.Type{DecisionType}, cel.StringType, + cel.UnaryBinding(decisionError))}, + "allowed": { + cel.MemberOverload("decision_allowed", []*cel.Type{DecisionType}, cel.BoolType, + cel.UnaryBinding(decisionAllowed))}, + "reason": { + cel.MemberOverload("decision_reason", []*cel.Type{DecisionType}, cel.StringType, + cel.UnaryBinding(decisionReason))}, +} + +func (*authz) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(authzLibraryDecls)) + for name, overloads := range authzLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*authz) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +// AuthzSelectors provides a CEL function library extension for adding fieldSelector and +// labelSelector filters to authorization checks. This requires the Authz library. +// See documentation of the Authz library for use and availability of the authorizer variable. +// +// fieldSelector +// +// Takes a string field selector, parses it to field selector requirements, and includes it in the authorization check. +// If the field selector does not parse successfully, no field selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+. +// +// .fieldSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').fieldSelector('spec.nodeName=mynode').check('list').allowed() +// +// labelSelector +// +// Takes a string label selector, parses it to label selector requirements, and includes it in the authorization check. +// If the label selector does not parse successfully, no label selector requirements are included in the authorization check. +// Added in Kubernetes 1.31+. +// +// .labelSelector() +// +// Examples: +// +// authorizer.group('').resource('pods').labelSelector('app=example').check('list').allowed() +func AuthzSelectors() cel.EnvOption { + return cel.Lib(authzSelectorsLib) +} + +var authzSelectorsLib = &authzSelectors{} + +type authzSelectors struct{} + +func (*authzSelectors) LibraryName() string { + return "k8s.authzSelectors" +} + +var authzSelectorsLibraryDecls = map[string][]cel.FunctionOpt{ + "fieldSelector": { + cel.MemberOverload("authorizer_fieldselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckFieldSelector))}, + "labelSelector": { + cel.MemberOverload("authorizer_labelselector", []*cel.Type{ResourceCheckType, cel.StringType}, ResourceCheckType, + cel.BinaryBinding(resourceCheckLabelSelector))}, +} + +func (*authzSelectors) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(authzSelectorsLibraryDecls)) + for name, overloads := range authzSelectorsLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*authzSelectors) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func authorizerPath(arg1, arg2 ref.Val) ref.Val { + authz, ok := arg1.(authorizerVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + path, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + if len(strings.TrimSpace(path)) == 0 { + return types.NewErr("path must not be empty") + } + + return authz.pathCheck(path) +} + +func authorizerGroup(arg1, arg2 ref.Val) ref.Val { + authz, ok := arg1.(authorizerVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + group, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + return authz.groupCheck(group) +} + +func authorizerServiceAccount(args ...ref.Val) ref.Val { + argn := len(args) + if argn != 3 { + return types.NoSuchOverloadErr() + } + + authz, ok := args[0].(authorizerVal) + if !ok { + return types.MaybeNoSuchOverloadErr(args[0]) + } + + namespace, ok := args[1].Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(args[1]) + } + + name, ok := args[2].Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(args[2]) + } + + if errors := apimachineryvalidation.ValidateServiceAccountName(name, false); len(errors) > 0 { + return types.NewErr("Invalid service account name") + } + if errors := apimachineryvalidation.ValidateNamespaceName(namespace, false); len(errors) > 0 { + return types.NewErr("Invalid service account namespace") + } + return authz.serviceAccount(namespace, name) +} + +func groupCheckResource(arg1, arg2 ref.Val) ref.Val { + groupCheck, ok := arg1.(groupCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + resource, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + if len(strings.TrimSpace(resource)) == 0 { + return types.NewErr("resource must not be empty") + } + return groupCheck.resourceCheck(resource) +} + +func resourceCheckSubresource(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + subresource, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.subresource = subresource + return result +} + +func resourceCheckFieldSelector(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + fieldSelector, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.fieldSelector = fieldSelector + return result +} + +func resourceCheckLabelSelector(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + labelSelector, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.labelSelector = labelSelector + return result +} + +func resourceCheckNamespace(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + namespace, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.namespace = namespace + return result +} + +func resourceCheckName(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + name, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + result := resourceCheck + result.name = name + return result +} + +func pathCheckCheck(arg1, arg2 ref.Val) ref.Val { + pathCheck, ok := arg1.(pathCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + httpRequestVerb, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + return pathCheck.Authorize(context.TODO(), httpRequestVerb) +} + +func resourceCheckCheck(arg1, arg2 ref.Val) ref.Val { + resourceCheck, ok := arg1.(resourceCheckVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + apiVerb, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + return resourceCheck.Authorize(context.TODO(), apiVerb) +} + +func decisionErrored(arg ref.Val) ref.Val { + decision, ok := arg.(decisionVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(decision.err != nil) +} + +func decisionError(arg ref.Val) ref.Val { + decision, ok := arg.(decisionVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + if decision.err == nil { + return types.String("") + } + return types.String(decision.err.Error()) +} + +func decisionAllowed(arg ref.Val) ref.Val { + decision, ok := arg.(decisionVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(decision.authDecision == authorizer.DecisionAllow) +} + +func decisionReason(arg ref.Val) ref.Val { + decision, ok := arg.(decisionVal) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.String(decision.reason) +} + +var ( + AuthorizerType = cel.ObjectType("kubernetes.authorization.Authorizer") + PathCheckType = cel.ObjectType("kubernetes.authorization.PathCheck") + GroupCheckType = cel.ObjectType("kubernetes.authorization.GroupCheck") + ResourceCheckType = cel.ObjectType("kubernetes.authorization.ResourceCheck") + DecisionType = cel.ObjectType("kubernetes.authorization.Decision") +) + +// Resource represents an API resource +type Resource interface { + // GetName returns the name of the object as presented in the request. On a CREATE operation, the client + // may omit name and rely on the server to generate the name. If that is the case, this method will return + // the empty string + GetName() string + // GetNamespace is the namespace associated with the request (if any) + GetNamespace() string + // GetResource is the name of the resource being requested. This is not the kind. For example: pods + GetResource() schema.GroupVersionResource + // GetSubresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + GetSubresource() string +} + +func NewAuthorizerVal(userInfo user.Info, authorizer authorizer.Authorizer) ref.Val { + return authorizerVal{receiverOnlyObjectVal: receiverOnlyVal(AuthorizerType), userInfo: userInfo, authAuthorizer: authorizer} +} + +func NewResourceAuthorizerVal(userInfo user.Info, authorizer authorizer.Authorizer, requestResource Resource) ref.Val { + a := authorizerVal{receiverOnlyObjectVal: receiverOnlyVal(AuthorizerType), userInfo: userInfo, authAuthorizer: authorizer} + resource := requestResource.GetResource() + g := a.groupCheck(resource.Group) + r := g.resourceCheck(resource.Resource) + r.subresource = requestResource.GetSubresource() + r.namespace = requestResource.GetNamespace() + r.name = requestResource.GetName() + return r +} + +type authorizerVal struct { + receiverOnlyObjectVal + userInfo user.Info + authAuthorizer authorizer.Authorizer +} + +func (a authorizerVal) pathCheck(path string) pathCheckVal { + return pathCheckVal{receiverOnlyObjectVal: receiverOnlyVal(PathCheckType), authorizer: a, path: path} +} + +func (a authorizerVal) groupCheck(group string) groupCheckVal { + return groupCheckVal{receiverOnlyObjectVal: receiverOnlyVal(GroupCheckType), authorizer: a, group: group} +} + +func (a authorizerVal) serviceAccount(namespace, name string) authorizerVal { + sa := &serviceaccount.ServiceAccountInfo{Name: name, Namespace: namespace} + return authorizerVal{ + receiverOnlyObjectVal: receiverOnlyVal(AuthorizerType), + userInfo: sa.UserInfo(), + authAuthorizer: a.authAuthorizer, + } +} + +type pathCheckVal struct { + receiverOnlyObjectVal + authorizer authorizerVal + path string +} + +func (a pathCheckVal) Authorize(ctx context.Context, verb string) ref.Val { + attr := &authorizer.AttributesRecord{ + Path: a.path, + Verb: verb, + User: a.authorizer.userInfo, + } + + decision, reason, err := a.authorizer.authAuthorizer.Authorize(ctx, attr) + return newDecision(decision, err, reason) +} + +type groupCheckVal struct { + receiverOnlyObjectVal + authorizer authorizerVal + group string +} + +func (g groupCheckVal) resourceCheck(resource string) resourceCheckVal { + return resourceCheckVal{receiverOnlyObjectVal: receiverOnlyVal(ResourceCheckType), groupCheck: g, resource: resource} +} + +type resourceCheckVal struct { + receiverOnlyObjectVal + groupCheck groupCheckVal + resource string + subresource string + namespace string + name string + fieldSelector string + labelSelector string +} + +func (a resourceCheckVal) Authorize(ctx context.Context, verb string) ref.Val { + attr := &authorizer.AttributesRecord{ + ResourceRequest: true, + APIGroup: a.groupCheck.group, + APIVersion: "*", + Resource: a.resource, + Subresource: a.subresource, + Namespace: a.namespace, + Name: a.name, + Verb: verb, + User: a.groupCheck.authorizer.userInfo, + } + + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + if len(a.fieldSelector) > 0 { + selector, err := fields.ParseSelector(a.fieldSelector) + if err != nil { + attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = nil, err + } else { + attr.FieldSelectorRequirements, attr.FieldSelectorParsingErr = selector.Requirements(), nil + } + } + if len(a.labelSelector) > 0 { + requirements, err := labels.ParseToRequirements(a.labelSelector) + if err != nil { + attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = nil, err + } else { + attr.LabelSelectorRequirements, attr.LabelSelectorParsingErr = requirements, nil + } + } + } + + decision, reason, err := a.groupCheck.authorizer.authAuthorizer.Authorize(ctx, attr) + return newDecision(decision, err, reason) +} + +func newDecision(authDecision authorizer.Decision, err error, reason string) decisionVal { + return decisionVal{receiverOnlyObjectVal: receiverOnlyVal(DecisionType), authDecision: authDecision, err: err, reason: reason} +} + +type decisionVal struct { + receiverOnlyObjectVal + err error + authDecision authorizer.Decision + reason string +} + +// receiverOnlyObjectVal provides an implementation of ref.Val for +// any object type that has receiver functions but does not expose any fields to +// CEL. +type receiverOnlyObjectVal struct { + typeValue *types.Type +} + +// receiverOnlyVal returns a receiverOnlyObjectVal for the given type. +func receiverOnlyVal(objectType *cel.Type) receiverOnlyObjectVal { + return receiverOnlyObjectVal{typeValue: types.NewTypeValue(objectType.String())} +} + +// ConvertToNative implements ref.Val.ConvertToNative. +func (a receiverOnlyObjectVal) ConvertToNative(typeDesc reflect.Type) (any, error) { + return nil, fmt.Errorf("type conversion error from '%s' to '%v'", a.typeValue.String(), typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (a receiverOnlyObjectVal) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case a.typeValue: + return a + case types.TypeType: + return a.typeValue + } + return types.NewErr("type conversion error from '%s' to '%s'", a.typeValue, typeVal) +} + +// Equal implements ref.Val.Equal. +func (a receiverOnlyObjectVal) Equal(other ref.Val) ref.Val { + o, ok := other.(receiverOnlyObjectVal) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(a == o) +} + +// Type implements ref.Val.Type. +func (a receiverOnlyObjectVal) Type() ref.Type { + return a.typeValue +} + +// Value implements ref.Val.Value. +func (a receiverOnlyObjectVal) Value() any { + return types.NoSuchOverloadErr() +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go b/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go new file mode 100644 index 00000000..c4259dae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cidr.go @@ -0,0 +1,287 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + "net/netip" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// CIDR provides a CEL function library extension of CIDR notation parsing functions. +// +// cidr +// +// Converts a string in CIDR notation to a network address representation or results in an error if the string is not a valid CIDR notation. +// The CIDR must be an IPv4 or IPv6 subnet address with a mask. +// Leading zeros in IPv4 address octets are not allowed. +// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4/24) are not allowed. +// +// cidr() +// +// Examples: +// +// cidr('192.168.0.0/16') // returns an IPv4 address with a CIDR mask +// cidr('::1/128') // returns an IPv6 address with a CIDR mask +// cidr('192.168.0.0/33') // error +// cidr('::1/129') // error +// cidr('192.168.0.1/16') // error, because there are non-0 bits after the prefix +// +// isCIDR +// +// Returns true if a string is a valid CIDR notation respresentation of a subnet with mask. +// The CIDR must be an IPv4 or IPv6 subnet address with a mask. +// Leading zeros in IPv4 address octets are not allowed. +// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4/24) are not allowed. +// +// isCIDR() +// +// Examples: +// +// isCIDR('192.168.0.0/16') // returns true +// isCIDR('::1/128') // returns true +// isCIDR('192.168.0.0/33') // returns false +// isCIDR('::1/129') // returns false +// +// containsIP / containerCIDR / ip / masked / prefixLength +// +// - containsIP: Returns true if a the CIDR contains the given IP address. +// The IP address must be an IPv4 or IPv6 address. +// May take either a string or IP address as an argument. +// +// - containsCIDR: Returns true if a the CIDR contains the given CIDR. +// The CIDR must be an IPv4 or IPv6 subnet address with a mask. +// May take either a string or CIDR as an argument. +// +// - ip: Returns the IP address representation of the CIDR. +// +// - masked: Returns the CIDR representation of the network address with a masked prefix. +// This can be used to return the canonical form of the CIDR network. +// +// - prefixLength: Returns the prefix length of the CIDR in bits. +// This is the number of bits in the mask. +// +// Examples: +// +// cidr('192.168.0.0/24').containsIP(ip('192.168.0.1')) // returns true +// cidr('192.168.0.0/24').containsIP(ip('192.168.1.1')) // returns false +// cidr('192.168.0.0/24').containsIP('192.168.0.1') // returns true +// cidr('192.168.0.0/24').containsIP('192.168.1.1') // returns false +// cidr('192.168.0.0/16').containsCIDR(cidr('192.168.10.0/24')) // returns true +// cidr('192.168.1.0/24').containsCIDR(cidr('192.168.2.0/24')) // returns false +// cidr('192.168.0.0/16').containsCIDR('192.168.10.0/24') // returns true +// cidr('192.168.1.0/24').containsCIDR('192.168.2.0/24') // returns false +// cidr('192.168.0.1/24').ip() // returns ipAddr('192.168.0.1') +// cidr('192.168.0.1/24').ip().family() // returns '4' +// cidr('::1/128').ip() // returns ipAddr('::1') +// cidr('::1/128').ip().family() // returns '6' +// cidr('192.168.0.0/24').masked() // returns cidr('192.168.0.0/24') +// cidr('192.168.0.1/24').masked() // returns cidr('192.168.0.0/24') +// cidr('192.168.0.0/24') == cidr('192.168.0.0/24').masked() // returns true, CIDR was already in canonical format +// cidr('192.168.0.1/24') == cidr('192.168.0.1/24').masked() // returns false, CIDR was not in canonical format +// cidr('192.168.0.0/16').prefixLength() // returns 16 +// cidr('::1/128').prefixLength() // returns 128 +func CIDR() cel.EnvOption { + return cel.Lib(cidrsLib) +} + +var cidrsLib = &cidrs{} + +type cidrs struct{} + +func (*cidrs) LibraryName() string { + return "net.cidr" +} + +var cidrLibraryDecls = map[string][]cel.FunctionOpt{ + "cidr": { + cel.Overload("string_to_cidr", []*cel.Type{cel.StringType}, apiservercel.CIDRType, + cel.UnaryBinding(stringToCIDR)), + }, + "containsIP": { + cel.MemberOverload("cidr_contains_ip_string", []*cel.Type{apiservercel.CIDRType, cel.StringType}, cel.BoolType, + cel.BinaryBinding(cidrContainsIPString)), + cel.MemberOverload("cidr_contains_ip_ip", []*cel.Type{apiservercel.CIDRType, apiservercel.IPType}, cel.BoolType, + cel.BinaryBinding(cidrContainsIP)), + }, + "containsCIDR": { + cel.MemberOverload("cidr_contains_cidr_string", []*cel.Type{apiservercel.CIDRType, cel.StringType}, cel.BoolType, + cel.BinaryBinding(cidrContainsCIDRString)), + cel.MemberOverload("cidr_contains_cidr", []*cel.Type{apiservercel.CIDRType, apiservercel.CIDRType}, cel.BoolType, + cel.BinaryBinding(cidrContainsCIDR)), + }, + "ip": { + cel.MemberOverload("cidr_ip", []*cel.Type{apiservercel.CIDRType}, apiservercel.IPType, + cel.UnaryBinding(cidrToIP)), + }, + "prefixLength": { + cel.MemberOverload("cidr_prefix_length", []*cel.Type{apiservercel.CIDRType}, cel.IntType, + cel.UnaryBinding(prefixLength)), + }, + "masked": { + cel.MemberOverload("cidr_masked", []*cel.Type{apiservercel.CIDRType}, apiservercel.CIDRType, + cel.UnaryBinding(masked)), + }, + "isCIDR": { + cel.Overload("is_cidr", []*cel.Type{cel.StringType}, cel.BoolType, + cel.UnaryBinding(isCIDR)), + }, + "string": { + cel.Overload("cidr_to_string", []*cel.Type{apiservercel.CIDRType}, cel.StringType, + cel.UnaryBinding(cidrToString)), + }, +} + +func (*cidrs) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{cel.Types(apiservercel.CIDRType), + cel.Variable(apiservercel.CIDRType.TypeName(), types.NewTypeTypeWithParam(apiservercel.CIDRType)), + } + for name, overloads := range cidrLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*cidrs) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func stringToCIDR(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + net, err := parseCIDR(s) + if err != nil { + return types.NewErr("network address parse error during conversion from string: %v", err) + } + + return apiservercel.CIDR{ + Prefix: net, + } +} + +func cidrToString(arg ref.Val) ref.Val { + cidr, ok := arg.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.String(cidr.Prefix.String()) +} + +func cidrContainsIPString(arg ref.Val, other ref.Val) ref.Val { + return cidrContainsIP(arg, stringToIP(other)) +} + +func cidrContainsCIDRString(arg ref.Val, other ref.Val) ref.Val { + return cidrContainsCIDR(arg, stringToCIDR(other)) +} + +func cidrContainsIP(arg ref.Val, other ref.Val) ref.Val { + cidr, ok := arg.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + + ip, ok := other.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(cidr.Contains(ip.Addr)) +} + +func cidrContainsCIDR(arg ref.Val, other ref.Val) ref.Val { + cidr, ok := arg.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + containsCIDR, ok := other.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + + equalMasked := cidr.Prefix.Masked() == netip.PrefixFrom(containsCIDR.Prefix.Addr(), cidr.Prefix.Bits()) + return types.Bool(equalMasked && cidr.Prefix.Bits() <= containsCIDR.Prefix.Bits()) +} + +func prefixLength(arg ref.Val) ref.Val { + cidr, ok := arg.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Int(cidr.Prefix.Bits()) +} + +func isCIDR(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + _, err := parseCIDR(s) + return types.Bool(err == nil) +} + +func cidrToIP(arg ref.Val) ref.Val { + cidr, ok := arg.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return apiservercel.IP{ + Addr: cidr.Prefix.Addr(), + } +} + +func masked(arg ref.Val) ref.Val { + cidr, ok := arg.(apiservercel.CIDR) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + maskedCIDR := cidr.Prefix.Masked() + return apiservercel.CIDR{ + Prefix: maskedCIDR, + } +} + +// parseCIDR parses a string into an CIDR. +// We use this function to parse CIDR notation in the CEL library +// so that we can share the common logic of rejecting strings +// that IPv4-mapped IPv6 addresses or contain non-zero bits after the mask. +func parseCIDR(raw string) (netip.Prefix, error) { + net, err := netip.ParsePrefix(raw) + if err != nil { + return netip.Prefix{}, fmt.Errorf("network address parse error during conversion from string: %v", err) + } + + if net.Addr().Is4In6() { + return netip.Prefix{}, fmt.Errorf("IPv4-mapped IPv6 address %q is not allowed", raw) + } + + return net, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/cost.go b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go new file mode 100644 index 00000000..b7168630 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/cost.go @@ -0,0 +1,581 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + "math" + + "github.com/google/cel-go/checker" + "github.com/google/cel-go/common" + "github.com/google/cel-go/common/ast" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + "k8s.io/apiserver/pkg/cel" +) + +// panicOnUnknown makes cost estimate functions panic on unrecognized functions. +// This is only set to true for unit tests. +var panicOnUnknown = false + +// builtInFunctions is a list of functions used in cost tests that are not handled by CostEstimator. +var knownUnhandledFunctions = map[string]bool{ + "uint": true, + "duration": true, + "bytes": true, + "timestamp": true, + "value": true, + "_==_": true, + "_&&_": true, + "_>_": true, + "!_": true, + "strings.quote": true, +} + +// CostEstimator implements CEL's interpretable.ActualCostEstimator and checker.CostEstimator. +type CostEstimator struct { + // SizeEstimator provides a CostEstimator.EstimateSize that this CostEstimator will delegate size estimation + // calculations to if the size is not well known (i.e. a constant). + SizeEstimator checker.CostEstimator +} + +const ( + // shortest repeatable selector requirement that allocates a values slice is 2 characters: k, + selectorLengthToRequirementCount = float64(.5) + // the expensive parts to represent each requirement are a struct and a values slice + costPerRequirement = float64(common.ListCreateBaseCost + common.StructCreateBaseCost) +) + +// a selector consists of a list of requirements held in a slice +var baseSelectorCost = checker.CostEstimate{Min: common.ListCreateBaseCost, Max: common.ListCreateBaseCost} + +func selectorCostEstimate(selectorLength checker.SizeEstimate) checker.CostEstimate { + parseCost := selectorLength.MultiplyByCostFactor(common.StringTraversalCostFactor) + + requirementCount := selectorLength.MultiplyByCostFactor(selectorLengthToRequirementCount) + requirementCost := requirementCount.MultiplyByCostFactor(costPerRequirement) + + return baseSelectorCost.Add(parseCost).Add(requirementCost) +} + +func (l *CostEstimator) CallCost(function, overloadId string, args []ref.Val, result ref.Val) *uint64 { + switch function { + case "check": + // An authorization check has a fixed cost + // This cost is set to allow for only two authorization checks per expression + cost := uint64(350000) + return &cost + case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored": + // All authorization builder and accessor functions have a nominal cost + cost := uint64(1) + return &cost + case "fieldSelector", "labelSelector": + // field and label selector parse is a string parse into a structured set of requirements + if len(args) >= 2 { + selectorLength := actualSize(args[1]) + cost := selectorCostEstimate(checker.SizeEstimate{Min: selectorLength, Max: selectorLength}) + return &cost.Max + } + case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf": + var cost uint64 + if len(args) > 0 { + cost += traversalCost(args[0]) // these O(n) operations all cost roughly the cost of a single traversal + } + return &cost + case "url", "lowerAscii", "upperAscii", "substring", "trim": + if len(args) >= 1 { + cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) + return &cost + } + case "replace", "split": + if len(args) >= 1 { + // cost is the traversal plus the construction of the result + cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor)) + return &cost + } + case "join": + if len(args) >= 1 { + cost := uint64(math.Ceil(float64(actualSize(result)) * 2 * common.StringTraversalCostFactor)) + return &cost + } + case "find", "findAll": + if len(args) >= 2 { + strCost := uint64(math.Ceil((1.0 + float64(actualSize(args[0]))) * common.StringTraversalCostFactor)) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := uint64(math.Ceil(float64(actualSize(args[1])) * common.RegexStringLengthCostFactor)) + cost := strCost * regexCost + return &cost + } + case "cidr", "isIP", "isCIDR": + // IP and CIDR parsing is a string traversal. + if len(args) >= 1 { + cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) + return &cost + } + case "ip": + // IP and CIDR parsing is a string traversal. + if len(args) >= 1 { + if overloadId == "cidr_ip" { + // The IP member of the CIDR object is just accessing a field. + // Nominal cost. + cost := uint64(1) + return &cost + } + + cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) + return &cost + } + case "ip.isCanonical": + if len(args) >= 1 { + // We have to parse the string and then compare the parsed string to the original string. + // So we double the cost of parsing the string. + cost := uint64(math.Ceil(float64(actualSize(args[0])) * 2 * common.StringTraversalCostFactor)) + return &cost + } + case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast": + // IP and CIDR accessors are nominal cost. + cost := uint64(1) + return &cost + case "containsIP": + if len(args) >= 2 { + cidrSize := actualSize(args[0]) + otherSize := actualSize(args[1]) + + // This is the base cost of comparing two byte lists. + // We will compare only up to the length of the CIDR prefix in bytes, so use the cidrSize twice. + cost := uint64(math.Ceil(float64(cidrSize+cidrSize) * common.StringTraversalCostFactor)) + + if overloadId == "cidr_contains_ip_string" { + // If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again. + cost += uint64(math.Ceil(float64(otherSize) * common.StringTraversalCostFactor)) + + } + + return &cost + } + case "containsCIDR": + if len(args) >= 2 { + cidrSize := actualSize(args[0]) + otherSize := actualSize(args[1]) + + // This is the base cost of comparing two byte lists. + // We will compare only up to the length of the CIDR prefix in bytes, so use the cidrSize twice. + cost := uint64(math.Ceil(float64(cidrSize+cidrSize) * common.StringTraversalCostFactor)) + + // As we are comparing if a CIDR is within another CIDR, we first mask the base CIDR and + // also compare the CIDR bits. + // This has an additional cost of the length of the IP being traversed again, plus 1. + cost += uint64(math.Ceil(float64(cidrSize)*common.StringTraversalCostFactor)) + 1 + + if overloadId == "cidr_contains_cidr_string" { + // If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again. + cost += uint64(math.Ceil(float64(otherSize) * common.StringTraversalCostFactor)) + } + + return &cost + } + case "quantity", "isQuantity": + if len(args) >= 1 { + cost := uint64(math.Ceil(float64(actualSize(args[0])) * common.StringTraversalCostFactor)) + return &cost + } + case "validate": + if len(args) >= 2 { + format, isFormat := args[0].Value().(*cel.Format) + if isFormat { + strSize := actualSize(args[1]) + + // Dont have access to underlying regex, estimate a long regexp + regexSize := format.MaxRegexSize + + // Copied from CEL implementation for regex cost + // + // https://swtch.com/~rsc/regexp/regexp1.html applies to RE2 implementation supported by CEL + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := uint64(math.Ceil((1.0 + float64(strSize)) * common.StringTraversalCostFactor)) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := uint64(math.Ceil(float64(regexSize) * common.RegexStringLengthCostFactor)) + cost := strCost * regexCost + return &cost + } + } + case "format.named": + // Simply dictionary lookup + cost := uint64(1) + return &cost + case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub": + cost := uint64(1) + return &cost + case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": + // url accessors + cost := uint64(1) + return &cost + } + if panicOnUnknown && !knownUnhandledFunctions[function] { + panic(fmt.Errorf("CallCost: unhandled function %q or args %v", function, args)) + } + return nil +} + +func (l *CostEstimator) EstimateCallCost(function, overloadId string, target *checker.AstNode, args []checker.AstNode) *checker.CallEstimate { + // WARNING: Any changes to this code impact API compatibility! The estimated cost is used to determine which CEL rules may be written to a + // CRD and any change (cost increases and cost decreases) are breaking. + switch function { + case "check": + // An authorization check has a fixed cost + // This cost is set to allow for only two authorization checks per expression + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 350000, Max: 350000}} + case "serviceAccount", "path", "group", "resource", "subresource", "namespace", "name", "allowed", "reason", "error", "errored": + // All authorization builder and accessor functions have a nominal cost + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "fieldSelector", "labelSelector": + // field and label selector parse is a string parse into a structured set of requirements + if len(args) == 1 { + return &checker.CallEstimate{CostEstimate: selectorCostEstimate(l.sizeEstimate(args[0]))} + } + case "isSorted", "sum", "max", "min", "indexOf", "lastIndexOf": + if target != nil { + // Charge 1 cost for comparing each element in the list + elCost := checker.CostEstimate{Min: 1, Max: 1} + // If the list contains strings or bytes, add the cost of traversing all the strings/bytes as a way + // of estimating the additional comparison cost. + if elNode := l.listElementNode(*target); elNode != nil { + k := elNode.Type().Kind() + if k == types.StringKind || k == types.BytesKind { + sz := l.sizeEstimate(elNode) + elCost = elCost.Add(sz.MultiplyByCostFactor(common.StringTraversalCostFactor)) + } + return &checker.CallEstimate{CostEstimate: l.sizeEstimate(*target).MultiplyByCost(elCost)} + } else { // the target is a string, which is supported by indexOf and lastIndexOf + return &checker.CallEstimate{CostEstimate: l.sizeEstimate(*target).MultiplyByCostFactor(common.StringTraversalCostFactor)} + } + } + case "url": + if len(args) == 1 { + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} + } + case "lowerAscii", "upperAscii", "substring", "trim": + if target != nil { + sz := l.sizeEstimate(*target) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz} + } + case "replace": + if target != nil && len(args) >= 2 { + sz := l.sizeEstimate(*target) + toReplaceSz := l.sizeEstimate(args[0]) + replaceWithSz := l.sizeEstimate(args[1]) + + var replaceCount, retainedSz checker.SizeEstimate + // find the longest replacement: + if toReplaceSz.Min == 0 { + // if the string being replaced is empty, replace surrounds all characters in the input string with the replacement. + if sz.Max < math.MaxUint64 { + replaceCount.Max = sz.Max + 1 + } else { + replaceCount.Max = sz.Max + } + // Include the length of the longest possible original string length. + retainedSz.Max = sz.Max + } else if replaceWithSz.Max <= toReplaceSz.Min { + // If the replacement does not make the result longer, use the original string length. + replaceCount.Max = 0 + retainedSz.Max = sz.Max + } else { + // Replace the smallest possible substrings with the largest possible replacement + // as many times as possible. + replaceCount.Max = uint64(math.Ceil(float64(sz.Max) / float64(toReplaceSz.Min))) + } + + // find the shortest replacement: + if toReplaceSz.Max == 0 { + // if the string being replaced is empty, replace surrounds all characters in the input string with the replacement. + if sz.Min < math.MaxUint64 { + replaceCount.Min = sz.Min + 1 + } else { + replaceCount.Min = sz.Min + } + // Include the length of the shortest possible original string length. + retainedSz.Min = sz.Min + } else if toReplaceSz.Max <= replaceWithSz.Min { + // If the replacement does not make the result shorter, use the original string length. + replaceCount.Min = 0 + retainedSz.Min = sz.Min + } else { + // Replace the largest possible substrings being with the smallest possible replacement + // as many times as possible. + replaceCount.Min = uint64(math.Ceil(float64(sz.Min) / float64(toReplaceSz.Max))) + } + size := replaceCount.Multiply(replaceWithSz).Add(retainedSz) + + // cost is the traversal plus the construction of the result + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor), ResultSize: &size} + } + case "split": + if target != nil { + sz := l.sizeEstimate(*target) + + // Worst case size is where is that a separator of "" is used, and each char is returned as a list element. + max := sz.Max + if len(args) > 1 { + if v := args[1].Expr().AsLiteral(); v != nil { + if i, ok := v.Value().(int64); ok { + max = uint64(i) + } + } + } + // Cost is the traversal plus the construction of the result. + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor), ResultSize: &checker.SizeEstimate{Min: 0, Max: max}} + } + case "join": + if target != nil { + var sz checker.SizeEstimate + listSize := l.sizeEstimate(*target) + if elNode := l.listElementNode(*target); elNode != nil { + elemSize := l.sizeEstimate(elNode) + sz = listSize.Multiply(elemSize) + } + + if len(args) > 0 { + sepSize := l.sizeEstimate(args[0]) + minSeparators := uint64(0) + maxSeparators := uint64(0) + if listSize.Min > 0 { + minSeparators = listSize.Min - 1 + } + if listSize.Max > 0 { + maxSeparators = listSize.Max - 1 + } + sz = sz.Add(sepSize.Multiply(checker.SizeEstimate{Min: minSeparators, Max: maxSeparators})) + } + + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor), ResultSize: &sz} + } + case "find", "findAll": + if target != nil && len(args) >= 1 { + sz := l.sizeEstimate(*target) + // Add one to string length for purposes of cost calculation to prevent product of string and regex to be 0 + // in case where string is empty but regex is still expensive. + strCost := sz.Add(checker.SizeEstimate{Min: 1, Max: 1}).MultiplyByCostFactor(common.StringTraversalCostFactor) + // We don't know how many expressions are in the regex, just the string length (a huge + // improvement here would be to somehow get a count the number of expressions in the regex or + // how many states are in the regex state machine and use that to measure regex cost). + // For now, we're making a guess that each expression in a regex is typically at least 4 chars + // in length. + regexCost := l.sizeEstimate(args[0]).MultiplyByCostFactor(common.RegexStringLengthCostFactor) + // worst case size of result is that every char is returned as separate find result. + return &checker.CallEstimate{CostEstimate: strCost.Multiply(regexCost), ResultSize: &checker.SizeEstimate{Min: 0, Max: sz.Max}} + } + case "cidr", "isIP", "isCIDR": + if target != nil { + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} + } + case "ip": + if target != nil && len(args) >= 1 { + if overloadId == "cidr_ip" { + // The IP member of the CIDR object is just accessing a field. + // Nominal cost. + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + } + + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} + } else if target != nil { + // The IP member of a CIDR is a just accessing a field, nominal cost. + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + } + case "ip.isCanonical": + if target != nil && len(args) >= 1 { + sz := l.sizeEstimate(args[0]) + // We have to parse the string and then compare the parsed string to the original string. + // So we double the cost of parsing the string. + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(2 * common.StringTraversalCostFactor)} + } + case "masked", "prefixLength", "family", "isUnspecified", "isLoopback", "isLinkLocalMulticast", "isLinkLocalUnicast", "isGlobalUnicast": + // IP and CIDR accessors are nominal cost. + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "containsIP": + if target != nil && len(args) >= 1 { + // The base cost of the function is the cost of comparing two byte lists. + // The byte lists will be either ipv4 or ipv6 so will have a length of 4, or 16 bytes. + sz := checker.SizeEstimate{Min: 4, Max: 16} + + // We have to compare the two strings to determine if the CIDR/IP is in the other CIDR. + ipCompCost := sz.Add(sz).MultiplyByCostFactor(common.StringTraversalCostFactor) + + if overloadId == "cidr_contains_ip_string" { + // If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again. + ipCompCost = ipCompCost.Add(checker.CostEstimate(l.sizeEstimate(args[0])).MultiplyByCostFactor(common.StringTraversalCostFactor)) + } + + return &checker.CallEstimate{CostEstimate: ipCompCost} + } + case "containsCIDR": + if target != nil && len(args) >= 1 { + // The base cost of the function is the cost of comparing two byte lists. + // The byte lists will be either ipv4 or ipv6 so will have a length of 4, or 16 bytes. + sz := checker.SizeEstimate{Min: 4, Max: 16} + + // We have to compare the two strings to determine if the CIDR/IP is in the other CIDR. + ipCompCost := sz.Add(sz).MultiplyByCostFactor(common.StringTraversalCostFactor) + + // As we are comparing if a CIDR is within another CIDR, we first mask the base CIDR and + // also compare the CIDR bits. + // This has an additional cost of the length of the IP being traversed again, plus 1. + ipCompCost = ipCompCost.Add(sz.MultiplyByCostFactor(common.StringTraversalCostFactor)) + ipCompCost = ipCompCost.Add(checker.CostEstimate{Min: 1, Max: 1}) + + if overloadId == "cidr_contains_cidr_string" { + // If we are comparing a string, we must parse the string to into the right type, so add the cost of traversing the string again. + ipCompCost = ipCompCost.Add(checker.CostEstimate(l.sizeEstimate(args[0])).MultiplyByCostFactor(common.StringTraversalCostFactor)) + } + + return &checker.CallEstimate{CostEstimate: ipCompCost} + } + case "quantity", "isQuantity": + if target != nil { + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor)} + } + case "validate": + if target != nil { + sz := l.sizeEstimate(args[0]) + return &checker.CallEstimate{CostEstimate: sz.MultiplyByCostFactor(common.StringTraversalCostFactor).MultiplyByCostFactor(cel.MaxNameFormatRegexSize * common.RegexStringLengthCostFactor)} + } + case "format.named": + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "sign", "asInteger", "isInteger", "asApproximateFloat", "isGreaterThan", "isLessThan", "compareTo", "add", "sub": + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + case "getScheme", "getHostname", "getHost", "getPort", "getEscapedPath", "getQuery": + // url accessors + return &checker.CallEstimate{CostEstimate: checker.CostEstimate{Min: 1, Max: 1}} + } + if panicOnUnknown && !knownUnhandledFunctions[function] { + panic(fmt.Errorf("EstimateCallCost: unhandled function %q, target %v, args %v", function, target, args)) + } + return nil +} + +func actualSize(value ref.Val) uint64 { + if sz, ok := value.(traits.Sizer); ok { + return uint64(sz.Size().(types.Int)) + } + if panicOnUnknown { + // debug.PrintStack() + panic(fmt.Errorf("actualSize: non-sizer type %T", value)) + } + return 1 +} + +func (l *CostEstimator) sizeEstimate(t checker.AstNode) checker.SizeEstimate { + if sz := t.ComputedSize(); sz != nil { + return *sz + } + if sz := l.EstimateSize(t); sz != nil { + return *sz + } + return checker.SizeEstimate{Min: 0, Max: math.MaxUint64} +} + +func (l *CostEstimator) listElementNode(list checker.AstNode) checker.AstNode { + if params := list.Type().Parameters(); len(params) > 0 { + lt := params[0] + nodePath := list.Path() + if nodePath != nil { + // Provide path if we have it so that a OpenAPIv3 maxLength validation can be looked up, if it exists + // for this node. + path := make([]string, len(nodePath)+1) + copy(path, nodePath) + path[len(nodePath)] = "@items" + return &itemsNode{path: path, t: lt, expr: nil} + } else { + // Provide just the type if no path is available so that worst case size can be looked up based on type. + return &itemsNode{t: lt, expr: nil} + } + } + return nil +} + +func (l *CostEstimator) EstimateSize(element checker.AstNode) *checker.SizeEstimate { + if l.SizeEstimator != nil { + return l.SizeEstimator.EstimateSize(element) + } + return nil +} + +type itemsNode struct { + path []string + t *types.Type + expr ast.Expr +} + +func (i *itemsNode) Path() []string { + return i.path +} + +func (i *itemsNode) Type() *types.Type { + return i.t +} + +func (i *itemsNode) Expr() ast.Expr { + return i.expr +} + +func (i *itemsNode) ComputedSize() *checker.SizeEstimate { + return nil +} + +var _ checker.AstNode = (*itemsNode)(nil) + +// traversalCost computes the cost of traversing a ref.Val as a data tree. +func traversalCost(v ref.Val) uint64 { + // TODO: This could potentially be optimized by sampling maps and lists instead of traversing. + switch vt := v.(type) { + case types.String: + return uint64(float64(len(string(vt))) * common.StringTraversalCostFactor) + case types.Bytes: + return uint64(float64(len([]byte(vt))) * common.StringTraversalCostFactor) + case traits.Lister: + cost := uint64(0) + for it := vt.Iterator(); it.HasNext() == types.True; { + i := it.Next() + cost += traversalCost(i) + } + return cost + case traits.Mapper: // maps and objects + cost := uint64(0) + for it := vt.Iterator(); it.HasNext() == types.True; { + k := it.Next() + cost += traversalCost(k) + traversalCost(vt.Get(k)) + } + return cost + default: + return 1 + } +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/format.go b/vendor/k8s.io/apiserver/pkg/cel/library/format.go new file mode 100644 index 00000000..c051f33c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/format.go @@ -0,0 +1,270 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + "net/url" + + "github.com/asaskevich/govalidator" + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" + "k8s.io/apimachinery/pkg/util/validation" + apiservercel "k8s.io/apiserver/pkg/cel" + "k8s.io/kube-openapi/pkg/validation/strfmt" +) + +// Format provides a CEL library exposing common named Kubernetes string +// validations. Can be used in CRD ValidationRules messageExpression. +// +// Example: +// +// rule: format.dns1123label.validate(object.metadata.name).hasValue() +// messageExpression: format.dns1123label.validate(object.metadata.name).value().join("\n") +// +// format.named(name: string) -> ?Format +// +// Returns the Format with the given name, if it exists. Otherwise, optional.none +// Allowed names are: +// - `dns1123Label` +// - `dns1123Subdomain` +// - `dns1035Label` +// - `qualifiedName` +// - `dns1123LabelPrefix` +// - `dns1123SubdomainPrefix` +// - `dns1035LabelPrefix` +// - `labelValue` +// - `uri` +// - `uuid` +// - `byte` +// - `date` +// - `datetime` +// +// format.() -> Format +// +// Convenience functions for all the named formats are also available +// +// Examples: +// format.dns1123Label().validate("my-label-name") +// format.dns1123Subdomain().validate("apiextensions.k8s.io") +// format.dns1035Label().validate("my-label-name") +// format.qualifiedName().validate("apiextensions.k8s.io/v1beta1") +// format.dns1123LabelPrefix().validate("my-label-prefix-") +// format.dns1123SubdomainPrefix().validate("mysubdomain.prefix.-") +// format.dns1035LabelPrefix().validate("my-label-prefix-") +// format.uri().validate("http://example.com") +// Uses same pattern as isURL, but returns an error +// format.uuid().validate("123e4567-e89b-12d3-a456-426614174000") +// format.byte().validate("aGVsbG8=") +// format.date().validate("2021-01-01") +// format.datetime().validate("2021-01-01T00:00:00Z") +// + +// .validate(str: string) -> ?list +// +// Validates the given string against the given format. Returns optional.none +// if the string is valid, otherwise a list of validation error strings. +func Format() cel.EnvOption { + return cel.Lib(formatLib) +} + +var formatLib = &format{} + +type format struct{} + +func (*format) LibraryName() string { + return "format" +} + +func ZeroArgumentFunctionBinding(binding func() ref.Val) decls.OverloadOpt { + return func(o *decls.OverloadDecl) (*decls.OverloadDecl, error) { + wrapped, err := decls.FunctionBinding(func(values ...ref.Val) ref.Val { return binding() })(o) + if err != nil { + return nil, err + } + if len(wrapped.ArgTypes()) != 0 { + return nil, fmt.Errorf("function binding must have 0 arguments") + } + return o, nil + } +} + +func (*format) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(formatLibraryDecls)) + for name, overloads := range formatLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + for name, constantValue := range ConstantFormats { + prefixedName := "format." + name + options = append(options, cel.Function(prefixedName, cel.Overload(prefixedName, []*cel.Type{}, apiservercel.FormatType, ZeroArgumentFunctionBinding(func() ref.Val { + return constantValue + })))) + } + return options +} + +func (*format) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +var ConstantFormats map[string]*apiservercel.Format = map[string]*apiservercel.Format{ + "dns1123Label": { + Name: "DNS1123Label", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, false) }, + MaxRegexSize: 30, + }, + "dns1123Subdomain": { + Name: "DNS1123Subdomain", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, false) }, + MaxRegexSize: 60, + }, + "dns1035Label": { + Name: "DNS1035Label", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, false) }, + MaxRegexSize: 30, + }, + "qualifiedName": { + Name: "QualifiedName", + ValidateFunc: validation.IsQualifiedName, + MaxRegexSize: 60, // uses subdomain regex + }, + + "dns1123LabelPrefix": { + Name: "DNS1123LabelPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSLabel(s, true) }, + MaxRegexSize: 30, + }, + "dns1123SubdomainPrefix": { + Name: "DNS1123SubdomainPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNSSubdomain(s, true) }, + MaxRegexSize: 60, + }, + "dns1035LabelPrefix": { + Name: "DNS1035LabelPrefix", + ValidateFunc: func(s string) []string { return apimachineryvalidation.NameIsDNS1035Label(s, true) }, + MaxRegexSize: 30, + }, + "labelValue": { + Name: "LabelValue", + ValidateFunc: validation.IsValidLabelValue, + MaxRegexSize: 40, + }, + + // CRD formats + // Implementations sourced from strfmt, which kube-openapi uses as its + // format library. There are other CRD formats supported, but they are + // covered by other portions of the CEL library (like IP/CIDR), or their + // use is discouraged (like bsonobjectid, email, etc) + "uri": { + Name: "URI", + ValidateFunc: func(s string) []string { + // Directly call ParseRequestURI since we can get a better error message + _, err := url.ParseRequestURI(s) + if err != nil { + return []string{err.Error()} + } + return nil + }, + // Use govalidator url regex to estimate, since ParseRequestURI + // doesnt use regex + MaxRegexSize: len(govalidator.URL), + }, + "uuid": { + Name: "uuid", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("uuid", s) { + return []string{"does not match the UUID format"} + } + return nil + }, + MaxRegexSize: len(strfmt.UUIDPattern), + }, + "byte": { + Name: "byte", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("byte", s) { + return []string{"invalid base64"} + } + return nil + }, + MaxRegexSize: len(govalidator.Base64), + }, + "date": { + Name: "date", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("date", s) { + return []string{"invalid date"} + } + return nil + }, + // Estimated regex size for RFC3339FullDate which is + // a date format. Assume a date-time pattern is longer + // so use that to conservatively estimate this + MaxRegexSize: len(strfmt.DateTimePattern), + }, + "datetime": { + Name: "datetime", + ValidateFunc: func(s string) []string { + if !strfmt.Default.Validates("datetime", s) { + return []string{"invalid datetime"} + } + return nil + }, + MaxRegexSize: len(strfmt.DateTimePattern), + }, +} + +var formatLibraryDecls = map[string][]cel.FunctionOpt{ + "validate": { + cel.MemberOverload("format-validate", []*cel.Type{apiservercel.FormatType, cel.StringType}, cel.OptionalType(cel.ListType(cel.StringType)), cel.BinaryBinding(formatValidate)), + }, + "format.named": { + cel.Overload("format-named", []*cel.Type{cel.StringType}, cel.OptionalType(apiservercel.FormatType), cel.UnaryBinding(func(name ref.Val) ref.Val { + nameString, ok := name.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(name) + } + + f, ok := ConstantFormats[nameString] + if !ok { + return types.OptionalNone + } + return types.OptionalOf(f) + })), + }, +} + +func formatValidate(arg1, arg2 ref.Val) ref.Val { + f, ok := arg1.Value().(*apiservercel.Format) + if !ok { + return types.MaybeNoSuchOverloadErr(arg1) + } + + str, ok := arg2.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg2) + } + + res := f.ValidateFunc(str) + if len(res) == 0 { + return types.OptionalNone + } + return types.OptionalOf(types.NewStringList(types.DefaultTypeAdapter, res)) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/ip.go b/vendor/k8s.io/apiserver/pkg/cel/library/ip.go new file mode 100644 index 00000000..cdfeb1da --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/ip.go @@ -0,0 +1,329 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + "net/netip" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// IP provides a CEL function library extension of IP address parsing functions. +// +// ip +// +// Converts a string to an IP address or results in an error if the string is not a valid IP address. +// The IP address must be an IPv4 or IPv6 address. +// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4) are not allowed. +// IP addresses with zones (e.g. fe80::1%eth0) are not allowed. +// Leading zeros in IPv4 address octets are not allowed. +// +// ip() +// +// Examples: +// +// ip('127.0.0.1') // returns an IPv4 address +// ip('::1') // returns an IPv6 address +// ip('127.0.0.256') // error +// ip(':::1') // error +// +// isIP +// +// Returns true if a string is a valid IP address. +// The IP address must be an IPv4 or IPv6 address. +// IPv4-mapped IPv6 addresses (e.g. ::ffff:1.2.3.4) are not allowed. +// IP addresses with zones (e.g. fe80::1%eth0) are not allowed. +// Leading zeros in IPv4 address octets are not allowed. +// +// isIP() +// +// Examples: +// +// isIP('127.0.0.1') // returns true +// isIP('::1') // returns true +// isIP('127.0.0.256') // returns false +// isIP(':::1') // returns false +// +// ip.isCanonical +// +// Returns true if the IP address is in its canonical form. +// There is exactly one canonical form for every IP address, so fields containing +// IPs in canonical form can just be treated as strings when checking for equality or uniqueness. +// +// ip.isCanonical() +// +// Examples: +// +// ip.isCanonical('127.0.0.1') // returns true; all valid IPv4 addresses are canonical +// ip.isCanonical('2001:db8::abcd') // returns true +// ip.isCanonical('2001:DB8::ABCD') // returns false +// ip.isCanonical('2001:db8::0:0:0:abcd') // returns false +// +// family / isUnspecified / isLoopback / isLinkLocalMulticast / isLinkLocalUnicast / isGlobalUnicast +// +// - family: returns the IP addresses' family (IPv4 or IPv6) as an integer, either '4' or '6'. +// +// - isUnspecified: returns true if the IP address is the unspecified address. +// Either the IPv4 address "0.0.0.0" or the IPv6 address "::". +// +// - isLoopback: returns true if the IP address is the loopback address. +// Either an IPv4 address with a value of 127.x.x.x or an IPv6 address with a value of ::1. +// +// - isLinkLocalMulticast: returns true if the IP address is a link-local multicast address. +// Either an IPv4 address with a value of 224.0.0.x or an IPv6 address in the network ff00::/8. +// +// - isLinkLocalUnicast: returns true if the IP address is a link-local unicast address. +// Either an IPv4 address with a value of 169.254.x.x or an IPv6 address in the network fe80::/10. +// +// - isGlobalUnicast: returns true if the IP address is a global unicast address. +// Either an IPv4 address that is not zero or 255.255.255.255 or an IPv6 address that is not a link-local unicast, loopback or multicast address. +// +// Examples: +// +// ip('127.0.0.1').family() // returns '4” +// ip('::1').family() // returns '6' +// ip('127.0.0.1').family() == 4 // returns true +// ip('::1').family() == 6 // returns true +// ip('0.0.0.0').isUnspecified() // returns true +// ip('127.0.0.1').isUnspecified() // returns false +// ip('::').isUnspecified() // returns true +// ip('::1').isUnspecified() // returns false +// ip('127.0.0.1').isLoopback() // returns true +// ip('192.168.0.1').isLoopback() // returns false +// ip('::1').isLoopback() // returns true +// ip('2001:db8::abcd').isLoopback() // returns false +// ip('224.0.0.1').isLinkLocalMulticast() // returns true +// ip('224.0.1.1').isLinkLocalMulticast() // returns false +// ip('ff02::1').isLinkLocalMulticast() // returns true +// ip('fd00::1').isLinkLocalMulticast() // returns false +// ip('169.254.169.254').isLinkLocalUnicast() // returns true +// ip('192.168.0.1').isLinkLocalUnicast() // returns false +// ip('fe80::1').isLinkLocalUnicast() // returns true +// ip('fd80::1').isLinkLocalUnicast() // returns false +// ip('192.168.0.1').isGlobalUnicast() // returns true +// ip('255.255.255.255').isGlobalUnicast() // returns false +// ip('2001:db8::abcd').isGlobalUnicast() // returns true +// ip('ff00::1').isGlobalUnicast() // returns false +func IP() cel.EnvOption { + return cel.Lib(ipLib) +} + +var ipLib = &ip{} + +type ip struct{} + +func (*ip) LibraryName() string { + return "net.ip" +} + +var ipLibraryDecls = map[string][]cel.FunctionOpt{ + "ip": { + cel.Overload("string_to_ip", []*cel.Type{cel.StringType}, apiservercel.IPType, + cel.UnaryBinding(stringToIP)), + }, + "family": { + cel.MemberOverload("ip_family", []*cel.Type{apiservercel.IPType}, cel.IntType, + cel.UnaryBinding(family)), + }, + "ip.isCanonical": { + cel.Overload("ip_is_canonical", []*cel.Type{cel.StringType}, cel.BoolType, + cel.UnaryBinding(ipIsCanonical)), + }, + "isUnspecified": { + cel.MemberOverload("ip_is_unspecified", []*cel.Type{apiservercel.IPType}, cel.BoolType, + cel.UnaryBinding(isUnspecified)), + }, + "isLoopback": { + cel.MemberOverload("ip_is_loopback", []*cel.Type{apiservercel.IPType}, cel.BoolType, + cel.UnaryBinding(isLoopback)), + }, + "isLinkLocalMulticast": { + cel.MemberOverload("ip_is_link_local_multicast", []*cel.Type{apiservercel.IPType}, cel.BoolType, + cel.UnaryBinding(isLinkLocalMulticast)), + }, + "isLinkLocalUnicast": { + cel.MemberOverload("ip_is_link_local_unicast", []*cel.Type{apiservercel.IPType}, cel.BoolType, + cel.UnaryBinding(isLinkLocalUnicast)), + }, + "isGlobalUnicast": { + cel.MemberOverload("ip_is_global_unicast", []*cel.Type{apiservercel.IPType}, cel.BoolType, + cel.UnaryBinding(isGlobalUnicast)), + }, + "isIP": { + cel.Overload("is_ip", []*cel.Type{cel.StringType}, cel.BoolType, + cel.UnaryBinding(isIP)), + }, + "string": { + cel.Overload("ip_to_string", []*cel.Type{apiservercel.IPType}, cel.StringType, + cel.UnaryBinding(ipToString)), + }, +} + +func (*ip) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{cel.Types(apiservercel.IPType), + cel.Variable(apiservercel.IPType.TypeName(), types.NewTypeTypeWithParam(apiservercel.IPType)), + } + for name, overloads := range ipLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*ip) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func stringToIP(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + addr, err := parseIPAddr(s) + if err != nil { + // Don't add context, we control the error message already. + return types.NewErr("%v", err) + } + + return apiservercel.IP{ + Addr: addr, + } +} + +func ipToString(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.String(ip.Addr.String()) +} + +func family(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + switch { + case ip.Addr.Is4(): + return types.Int(4) + case ip.Addr.Is6(): + return types.Int(6) + default: + return types.NewErr("IP address %q is not an IPv4 or IPv6 address", ip.Addr.String()) + } +} + +func ipIsCanonical(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + addr, err := parseIPAddr(s) + if err != nil { + // Don't add context, we control the error message already. + return types.NewErr("%v", err) + } + + // Addr.String() always returns the canonical form of the IP address. + // Therefore comparing this with the original string representation + // will tell us if the IP address is in its canonical form. + return types.Bool(addr.String() == s) +} + +func isIP(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + _, err := parseIPAddr(s) + return types.Bool(err == nil) +} + +func isUnspecified(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(ip.Addr.IsUnspecified()) +} + +func isLoopback(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(ip.Addr.IsLoopback()) +} + +func isLinkLocalMulticast(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(ip.Addr.IsLinkLocalMulticast()) +} + +func isLinkLocalUnicast(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(ip.Addr.IsLinkLocalUnicast()) +} + +func isGlobalUnicast(arg ref.Val) ref.Val { + ip, ok := arg.(apiservercel.IP) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(ip.Addr.IsGlobalUnicast()) +} + +// parseIPAddr parses a string into an IP address. +// We use this function to parse IP addresses in the CEL library +// so that we can share the common logic of rejecting IP addresses +// that contain zones or are IPv4-mapped IPv6 addresses. +func parseIPAddr(raw string) (netip.Addr, error) { + addr, err := netip.ParseAddr(raw) + if err != nil { + return netip.Addr{}, fmt.Errorf("IP Address %q parse error during conversion from string: %v", raw, err) + } + + if addr.Zone() != "" { + return netip.Addr{}, fmt.Errorf("IP address %q with zone value is not allowed", raw) + } + + if addr.Is4In6() { + return netip.Addr{}, fmt.Errorf("IPv4-mapped IPv6 address %q is not allowed", raw) + } + + return addr, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/lists.go b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go new file mode 100644 index 00000000..327ec93d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/lists.go @@ -0,0 +1,316 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "fmt" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + "github.com/google/cel-go/interpreter/functions" +) + +// Lists provides a CEL function library extension of list utility functions. +// +// isSorted +// +// Returns true if the provided list of comparable elements is sorted, else returns false. +// +// >.isSorted() , T must be a comparable type +// +// Examples: +// +// [1, 2, 3].isSorted() // return true +// ['a', 'b', 'b', 'c'].isSorted() // return true +// [2.0, 1.0].isSorted() // return false +// [1].isSorted() // return true +// [].isSorted() // return true +// +// sum +// +// Returns the sum of the elements of the provided list. Supports CEL number (int, uint, double) and duration types. +// +// >.sum() , T must be a numeric type or a duration +// +// Examples: +// +// [1, 3].sum() // returns 4 +// [1.0, 3.0].sum() // returns 4.0 +// ['1m', '1s'].sum() // returns '1m1s' +// emptyIntList.sum() // returns 0 +// emptyDoubleList.sum() // returns 0.0 +// [].sum() // returns 0 +// +// min / max +// +// Returns the minimum/maximum valued element of the provided list. Supports all comparable types. +// If the list is empty, an error is returned. +// +// >.min() , T must be a comparable type +// >.max() , T must be a comparable type +// +// Examples: +// +// [1, 3].min() // returns 1 +// [1, 3].max() // returns 3 +// [].min() // error +// [1].min() // returns 1 +// ([0] + emptyList).min() // returns 0 +// +// indexOf / lastIndexOf +// +// Returns either the first or last positional index of the provided element in the list. +// If the element is not found, -1 is returned. Supports all equatable types. +// +// >.indexOf() , T must be an equatable type +// >.lastIndexOf() , T must be an equatable type +// +// Examples: +// +// [1, 2, 2, 3].indexOf(2) // returns 1 +// ['a', 'b', 'b', 'c'].lastIndexOf('b') // returns 2 +// [1.0].indexOf(1.1) // returns -1 +// [].indexOf('string') // returns -1 +func Lists() cel.EnvOption { + return cel.Lib(listsLib) +} + +var listsLib = &lists{} + +type lists struct{} + +func (*lists) LibraryName() string { + return "k8s.lists" +} + +var paramA = cel.TypeParamType("A") + +// CEL typeParams can be used to constraint to a specific trait (e.g. traits.ComparableType) if the 1st operand is the type to constrain. +// But the functions we need to constrain are >, not just . +// Make sure the order of overload set is deterministic +type namedCELType struct { + typeName string + celType *cel.Type +} + +var summableTypes = []namedCELType{ + {typeName: "int", celType: cel.IntType}, + {typeName: "uint", celType: cel.UintType}, + {typeName: "double", celType: cel.DoubleType}, + {typeName: "duration", celType: cel.DurationType}, +} + +var zeroValuesOfSummableTypes = map[string]ref.Val{ + "int": types.Int(0), + "uint": types.Uint(0), + "double": types.Double(0.0), + "duration": types.Duration{Duration: 0}, +} +var comparableTypes = []namedCELType{ + {typeName: "int", celType: cel.IntType}, + {typeName: "uint", celType: cel.UintType}, + {typeName: "double", celType: cel.DoubleType}, + {typeName: "bool", celType: cel.BoolType}, + {typeName: "duration", celType: cel.DurationType}, + {typeName: "timestamp", celType: cel.TimestampType}, + {typeName: "string", celType: cel.StringType}, + {typeName: "bytes", celType: cel.BytesType}, +} + +// WARNING: All library additions or modifications must follow +// https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/2876-crd-validation-expression-language#function-library-updates +var listsLibraryDecls = map[string][]cel.FunctionOpt{ + "isSorted": templatedOverloads(comparableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt { + return cel.MemberOverload(fmt.Sprintf("list_%s_is_sorted_bool", name), + []*cel.Type{cel.ListType(paramType)}, cel.BoolType, cel.UnaryBinding(isSorted)) + }), + "sum": templatedOverloads(summableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt { + return cel.MemberOverload(fmt.Sprintf("list_%s_sum_%s", name, name), + []*cel.Type{cel.ListType(paramType)}, paramType, cel.UnaryBinding(func(list ref.Val) ref.Val { + return sum( + func() ref.Val { + return zeroValuesOfSummableTypes[name] + })(list) + })) + }), + "max": templatedOverloads(comparableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt { + return cel.MemberOverload(fmt.Sprintf("list_%s_max_%s", name, name), + []*cel.Type{cel.ListType(paramType)}, paramType, cel.UnaryBinding(max())) + }), + "min": templatedOverloads(comparableTypes, func(name string, paramType *cel.Type) cel.FunctionOpt { + return cel.MemberOverload(fmt.Sprintf("list_%s_min_%s", name, name), + []*cel.Type{cel.ListType(paramType)}, paramType, cel.UnaryBinding(min())) + }), + "indexOf": { + cel.MemberOverload("list_a_index_of_int", []*cel.Type{cel.ListType(paramA), paramA}, cel.IntType, + cel.BinaryBinding(indexOf)), + }, + "lastIndexOf": { + cel.MemberOverload("list_a_last_index_of_int", []*cel.Type{cel.ListType(paramA), paramA}, cel.IntType, + cel.BinaryBinding(lastIndexOf)), + }, +} + +func (*lists) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range listsLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*lists) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func isSorted(val ref.Val) ref.Val { + var prev traits.Comparer + iterable, ok := val.(traits.Iterable) + if !ok { + return types.MaybeNoSuchOverloadErr(val) + } + for it := iterable.Iterator(); it.HasNext() == types.True; { + next := it.Next() + nextCmp, ok := next.(traits.Comparer) + if !ok { + return types.MaybeNoSuchOverloadErr(next) + } + if prev != nil { + cmp := prev.Compare(next) + if cmp == types.IntOne { + return types.False + } + } + prev = nextCmp + } + return types.True +} + +func sum(init func() ref.Val) functions.UnaryOp { + return func(val ref.Val) ref.Val { + i := init() + acc, ok := i.(traits.Adder) + if !ok { + // Should never happen since all passed in init values are valid + return types.MaybeNoSuchOverloadErr(i) + } + iterable, ok := val.(traits.Iterable) + if !ok { + return types.MaybeNoSuchOverloadErr(val) + } + for it := iterable.Iterator(); it.HasNext() == types.True; { + next := it.Next() + nextAdder, ok := next.(traits.Adder) + if !ok { + // Should never happen for type checked CEL programs + return types.MaybeNoSuchOverloadErr(next) + } + if acc != nil { + s := acc.Add(next) + sum, ok := s.(traits.Adder) + if !ok { + // Should never happen for type checked CEL programs + return types.MaybeNoSuchOverloadErr(s) + } + acc = sum + } else { + acc = nextAdder + } + } + return acc.(ref.Val) + } +} + +func min() functions.UnaryOp { + return cmp("min", types.IntOne) +} + +func max() functions.UnaryOp { + return cmp("max", types.IntNegOne) +} + +func cmp(opName string, opPreferCmpResult ref.Val) functions.UnaryOp { + return func(val ref.Val) ref.Val { + var result traits.Comparer + iterable, ok := val.(traits.Iterable) + if !ok { + return types.MaybeNoSuchOverloadErr(val) + } + for it := iterable.Iterator(); it.HasNext() == types.True; { + next := it.Next() + nextCmp, ok := next.(traits.Comparer) + if !ok { + // Should never happen for type checked CEL programs + return types.MaybeNoSuchOverloadErr(next) + } + if result == nil { + result = nextCmp + } else { + cmp := result.Compare(next) + if cmp == opPreferCmpResult { + result = nextCmp + } + } + } + if result == nil { + return types.NewErr("%s called on empty list", opName) + } + return result.(ref.Val) + } +} + +func indexOf(list ref.Val, item ref.Val) ref.Val { + lister, ok := list.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(list) + } + sz := lister.Size().(types.Int) + for i := types.Int(0); i < sz; i++ { + if lister.Get(types.Int(i)).Equal(item) == types.True { + return types.Int(i) + } + } + return types.Int(-1) +} + +func lastIndexOf(list ref.Val, item ref.Val) ref.Val { + lister, ok := list.(traits.Lister) + if !ok { + return types.MaybeNoSuchOverloadErr(list) + } + sz := lister.Size().(types.Int) + for i := sz - 1; i >= 0; i-- { + if lister.Get(types.Int(i)).Equal(item) == types.True { + return types.Int(i) + } + } + return types.Int(-1) +} + +// templatedOverloads returns overloads for each of the provided types. The template function is called with each type +// name (map key) and type to construct the overloads. +func templatedOverloads(types []namedCELType, template func(name string, t *cel.Type) cel.FunctionOpt) []cel.FunctionOpt { + overloads := make([]cel.FunctionOpt, len(types)) + i := 0 + for _, t := range types { + overloads[i] = template(t.typeName, t.celType) + i++ + } + return overloads +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go new file mode 100644 index 00000000..b4ac91c8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/quantity.go @@ -0,0 +1,380 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "errors" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + "k8s.io/apimachinery/pkg/api/resource" + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// Quantity provides a CEL function library extension of Kubernetes +// resource.Quantity parsing functions. See `resource.Quantity` +// documentation for more detailed information about the format itself: +// https://pkg.go.dev/k8s.io/apimachinery/pkg/api/resource#Quantity +// +// quantity +// +// Converts a string to a Quantity or results in an error if the string is not a valid Quantity. Refer +// to resource.Quantity documentation for information on accepted patterns. +// +// quantity() +// +// Examples: +// +// quantity('1.5G') // returns a Quantity +// quantity('200k') // returns a Quantity +// quantity('200K') // error +// quantity('Three') // error +// quantity('Mi') // error +// +// isQuantity +// +// Returns true if a string is a valid Quantity. isQuantity returns true if and +// only if quantity does not result in error. +// +// isQuantity( ) +// +// Examples: +// +// isQuantity('1.3G') // returns true +// isQuantity('1.3Gi') // returns true +// isQuantity('1,3G') // returns false +// isQuantity('10000k') // returns true +// isQuantity('200K') // returns false +// isQuantity('Three') // returns false +// isQuantity('Mi') // returns false +// +// Conversion to Scalars: +// +// - isInteger: returns true if and only if asInteger is safe to call without an error +// +// - asInteger: returns a representation of the current value as an int64 if +// possible or results in an error if conversion would result in overflow +// or loss of precision. +// +// - asApproximateFloat: returns a float64 representation of the quantity which may +// lose precision. If the value of the quantity is outside the range of a float64 +// +Inf/-Inf will be returned. +// +// .isInteger() +// .asInteger() +// .asApproximateFloat() +// +// Examples: +// +// quantity("50000000G").isInteger() // returns true +// quantity("50k").isInteger() // returns true +// quantity("9999999999999999999999999999999999999G").asInteger() // error: cannot convert value to integer +// quantity("9999999999999999999999999999999999999G").isInteger() // returns false +// quantity("50k").asInteger() == 50000 // returns true +// quantity("50k").sub(20000).asApproximateFloat() == 30000 // returns true +// +// Arithmetic +// +// - sign: Returns `1` if the quantity is positive, `-1` if it is negative. `0` if it is zero +// +// - add: Returns sum of two quantities or a quantity and an integer +// +// - sub: Returns difference between two quantities or a quantity and an integer +// +// .sign() +// .add() +// .add() +// .sub() +// .sub() +// +// Examples: +// +// quantity("50k").add("20k") == quantity("70k") // returns true +// quantity("50k").add(20) == quantity("50020") // returns true +// quantity("50k").sub("20k") == quantity("30k") // returns true +// quantity("50k").sub(20000) == quantity("30k") // returns true +// quantity("50k").add(20).sub(quantity("100k")).sub(-50000) == quantity("20") // returns true +// +// Comparisons +// +// - isGreaterThan: Returns true if and only if the receiver is greater than the operand +// +// - isLessThan: Returns true if and only if the receiver is less than the operand +// +// - compareTo: Compares receiver to operand and returns 0 if they are equal, 1 if the receiver is greater, or -1 if the receiver is less than the operand +// +// +// .isLessThan() +// .isGreaterThan() +// .compareTo() +// +// Examples: +// +// quantity("200M").compareTo(quantity("0.2G")) // returns 0 +// quantity("50M").compareTo(quantity("50Mi")) // returns -1 +// quantity("50Mi").compareTo(quantity("50M")) // returns 1 +// quantity("150Mi").isGreaterThan(quantity("100Mi")) // returns true +// quantity("50Mi").isGreaterThan(quantity("100Mi")) // returns false +// quantity("50M").isLessThan(quantity("100M")) // returns true +// quantity("100M").isLessThan(quantity("50M")) // returns false + +func Quantity() cel.EnvOption { + return cel.Lib(quantityLib) +} + +var quantityLib = &quantity{} + +type quantity struct{} + +func (*quantity) LibraryName() string { + return "k8s.quantity" +} + +var quantityLibraryDecls = map[string][]cel.FunctionOpt{ + "quantity": { + cel.Overload("string_to_quantity", []*cel.Type{cel.StringType}, apiservercel.QuantityType, cel.UnaryBinding((stringToQuantity))), + }, + "isQuantity": { + cel.Overload("is_quantity_string", []*cel.Type{cel.StringType}, cel.BoolType, cel.UnaryBinding(isQuantity)), + }, + "sign": { + cel.Overload("quantity_sign", []*cel.Type{apiservercel.QuantityType}, cel.IntType, cel.UnaryBinding(quantityGetSign)), + }, + "isGreaterThan": { + cel.MemberOverload("quantity_is_greater_than", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, cel.BoolType, cel.BinaryBinding(quantityIsGreaterThan)), + }, + "isLessThan": { + cel.MemberOverload("quantity_is_less_than", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, cel.BoolType, cel.BinaryBinding(quantityIsLessThan)), + }, + "compareTo": { + cel.MemberOverload("quantity_compare_to", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, cel.IntType, cel.BinaryBinding(quantityCompareTo)), + }, + "asApproximateFloat": { + cel.MemberOverload("quantity_get_float", []*cel.Type{apiservercel.QuantityType}, cel.DoubleType, cel.UnaryBinding(quantityGetApproximateFloat)), + }, + "asInteger": { + cel.MemberOverload("quantity_get_int", []*cel.Type{apiservercel.QuantityType}, cel.IntType, cel.UnaryBinding(quantityGetValue)), + }, + "isInteger": { + cel.MemberOverload("quantity_is_integer", []*cel.Type{apiservercel.QuantityType}, cel.BoolType, cel.UnaryBinding(quantityCanValue)), + }, + "add": { + cel.MemberOverload("quantity_add", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, apiservercel.QuantityType, cel.BinaryBinding(quantityAdd)), + cel.MemberOverload("quantity_add_int", []*cel.Type{apiservercel.QuantityType, cel.IntType}, apiservercel.QuantityType, cel.BinaryBinding(quantityAddInt)), + }, + "sub": { + cel.MemberOverload("quantity_sub", []*cel.Type{apiservercel.QuantityType, apiservercel.QuantityType}, apiservercel.QuantityType, cel.BinaryBinding(quantitySub)), + cel.MemberOverload("quantity_sub_int", []*cel.Type{apiservercel.QuantityType, cel.IntType}, apiservercel.QuantityType, cel.BinaryBinding(quantitySubInt)), + }, +} + +func (*quantity) CompileOptions() []cel.EnvOption { + options := make([]cel.EnvOption, 0, len(quantityLibraryDecls)) + for name, overloads := range quantityLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*quantity) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func isQuantity(arg ref.Val) ref.Val { + str, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + _, err := resource.ParseQuantity(str) + if err != nil { + return types.Bool(false) + } + + return types.Bool(true) +} + +func stringToQuantity(arg ref.Val) ref.Val { + str, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q, err := resource.ParseQuantity(str) + if err != nil { + return types.WrapErr(err) + } + + return apiservercel.Quantity{Quantity: &q} +} + +func quantityGetApproximateFloat(arg ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Double(q.AsApproximateFloat64()) +} + +func quantityCanValue(arg ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + _, success := q.AsInt64() + return types.Bool(success) +} + +func quantityGetValue(arg ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + v, success := q.AsInt64() + if !success { + return types.WrapErr(errors.New("cannot convert value to integer")) + } + return types.Int(v) +} + +func quantityGetSign(arg ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.Int(q.Sign()) +} + +func quantityIsGreaterThan(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(q.Cmp(*q2) == 1) +} + +func quantityIsLessThan(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Bool(q.Cmp(*q2) == -1) +} + +func quantityCompareTo(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + return types.Int(q.Cmp(*q2)) +} + +func quantityAdd(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + copy := *q + copy.Add(*q2) + return &apiservercel.Quantity{ + Quantity: ©, + } +} + +func quantityAddInt(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2Converted := *resource.NewQuantity(q2, resource.DecimalExponent) + + copy := *q + copy.Add(q2Converted) + return &apiservercel.Quantity{ + Quantity: ©, + } +} + +func quantitySub(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + copy := *q + copy.Sub(*q2) + return &apiservercel.Quantity{ + Quantity: ©, + } +} + +func quantitySubInt(arg ref.Val, other ref.Val) ref.Val { + q, ok := arg.Value().(*resource.Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2, ok := other.Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + q2Converted := *resource.NewQuantity(q2, resource.DecimalExponent) + + copy := *q + copy.Sub(q2Converted) + return &apiservercel.Quantity{ + Quantity: ©, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/regex.go b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go new file mode 100644 index 00000000..147a40f9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/regex.go @@ -0,0 +1,193 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "regexp" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/interpreter" +) + +// Regex provides a CEL function library extension of regex utility functions. +// +// find / findAll +// +// Returns substrings that match the provided regular expression. find returns the first match. findAll may optionally +// be provided a limit. If the limit is set and >= 0, no more than the limit number of matches are returned. +// +// .find() +// .findAll() > +// .findAll(, ) > +// +// Examples: +// +// "abc 123".find('[0-9]*') // returns '123' +// "abc 123".find('xyz') // returns '' +// "123 abc 456".findAll('[0-9]*') // returns ['123', '456'] +// "123 abc 456".findAll('[0-9]*', 1) // returns ['123'] +// "123 abc 456".findAll('xyz') // returns [] +func Regex() cel.EnvOption { + return cel.Lib(regexLib) +} + +var regexLib = ®ex{} + +type regex struct{} + +func (*regex) LibraryName() string { + return "k8s.regex" +} + +var regexLibraryDecls = map[string][]cel.FunctionOpt{ + "find": { + cel.MemberOverload("string_find_string", []*cel.Type{cel.StringType, cel.StringType}, cel.StringType, + cel.BinaryBinding(find))}, + "findAll": { + cel.MemberOverload("string_find_all_string", []*cel.Type{cel.StringType, cel.StringType}, + cel.ListType(cel.StringType), + cel.BinaryBinding(func(str, regex ref.Val) ref.Val { + return findAll(str, regex, types.Int(-1)) + })), + cel.MemberOverload("string_find_all_string_int", + []*cel.Type{cel.StringType, cel.StringType, cel.IntType}, + cel.ListType(cel.StringType), + cel.FunctionBinding(findAll)), + }, +} + +func (*regex) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range regexLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*regex) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{ + cel.OptimizeRegex(FindRegexOptimization, FindAllRegexOptimization), + } +} + +func find(strVal ref.Val, regexVal ref.Val) ref.Val { + str, ok := strVal.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(strVal) + } + regex, ok := regexVal.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(regexVal) + } + re, err := regexp.Compile(regex) + if err != nil { + return types.NewErr("Illegal regex: %v", err.Error()) + } + result := re.FindString(str) + return types.String(result) +} + +func findAll(args ...ref.Val) ref.Val { + argn := len(args) + if argn < 2 || argn > 3 { + return types.NoSuchOverloadErr() + } + str, ok := args[0].Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(args[0]) + } + regex, ok := args[1].Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(args[1]) + } + n := int64(-1) + if argn == 3 { + n, ok = args[2].Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(args[2]) + } + } + + re, err := regexp.Compile(regex) + if err != nil { + return types.NewErr("Illegal regex: %v", err.Error()) + } + + result := re.FindAllString(str, int(n)) + + return types.NewStringList(types.DefaultTypeAdapter, result) +} + +// FindRegexOptimization optimizes the 'find' function by compiling the regex pattern and +// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function +// call invocations. +var FindRegexOptimization = &interpreter.RegexOptimization{ + Function: "find", + RegexIndex: 1, + Factory: func(call interpreter.InterpretableCall, regexPattern string) (interpreter.InterpretableCall, error) { + compiledRegex, err := regexp.Compile(regexPattern) + if err != nil { + return nil, err + } + return interpreter.NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(args ...ref.Val) ref.Val { + if len(args) != 2 { + return types.NoSuchOverloadErr() + } + in, ok := args[0].Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(args[0]) + } + return types.String(compiledRegex.FindString(in)) + }), nil + }, +} + +// FindAllRegexOptimization optimizes the 'findAll' function by compiling the regex pattern and +// reporting any compilation errors at program creation time, and using the compiled regex pattern for all function +// call invocations. +var FindAllRegexOptimization = &interpreter.RegexOptimization{ + Function: "findAll", + RegexIndex: 1, + Factory: func(call interpreter.InterpretableCall, regexPattern string) (interpreter.InterpretableCall, error) { + compiledRegex, err := regexp.Compile(regexPattern) + if err != nil { + return nil, err + } + return interpreter.NewCall(call.ID(), call.Function(), call.OverloadID(), call.Args(), func(args ...ref.Val) ref.Val { + argn := len(args) + if argn < 2 || argn > 3 { + return types.NoSuchOverloadErr() + } + str, ok := args[0].Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(args[0]) + } + n := int64(-1) + if argn == 3 { + n, ok = args[2].Value().(int64) + if !ok { + return types.MaybeNoSuchOverloadErr(args[2]) + } + } + + result := compiledRegex.FindAllString(str, int(n)) + return types.NewStringList(types.DefaultTypeAdapter, result) + }), nil + }, +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/test.go b/vendor/k8s.io/apiserver/pkg/cel/library/test.go new file mode 100644 index 00000000..dcbc058a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/test.go @@ -0,0 +1,83 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "math" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// Test provides a test() function that returns true. +func Test(options ...TestOption) cel.EnvOption { + t := &testLib{version: math.MaxUint32} + for _, o := range options { + t = o(t) + } + return cel.Lib(t) +} + +type testLib struct { + version uint32 +} + +func (*testLib) LibraryName() string { + return "k8s.test" +} + +type TestOption func(*testLib) *testLib + +func TestVersion(version uint32) func(lib *testLib) *testLib { + return func(sl *testLib) *testLib { + sl.version = version + return sl + } +} + +func (t *testLib) CompileOptions() []cel.EnvOption { + var options []cel.EnvOption + + if t.version == 0 { + options = append(options, cel.Function("test", + cel.Overload("test", []*cel.Type{}, cel.BoolType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + return types.True + })))) + } + + if t.version >= 1 { + options = append(options, cel.Function("test", + cel.Overload("test", []*cel.Type{}, cel.BoolType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + // Return false here so tests can observe which version of the function is registered + // Actual function libraries must not break backward compatibility + return types.False + })))) + options = append(options, cel.Function("testV1", + cel.Overload("testV1", []*cel.Type{}, cel.BoolType, + cel.FunctionBinding(func(args ...ref.Val) ref.Val { + return types.True + })))) + } + return options +} + +func (*testLib) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/library/urls.go b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go new file mode 100644 index 00000000..8f4ba85a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/library/urls.go @@ -0,0 +1,240 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package library + +import ( + "net/url" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + + apiservercel "k8s.io/apiserver/pkg/cel" +) + +// URLs provides a CEL function library extension of URL parsing functions. +// +// url +// +// Converts a string to a URL or results in an error if the string is not a valid URL. The URL must be an absolute URI +// or an absolute path. +// +// url() +// +// Examples: +// +// url('https://user:pass@example.com:80/path?query=val#fragment') // returns a URL +// url('/absolute-path') // returns a URL +// url('https://a:b:c/') // error +// url('../relative-path') // error +// +// isURL +// +// Returns true if a string is a valid URL. The URL must be an absolute URI or an absolute path. +// +// isURL( ) +// +// Examples: +// +// isURL('https://user:pass@example.com:80/path?query=val#fragment') // returns true +// isURL('/absolute-path') // returns true +// isURL('https://a:b:c/') // returns false +// isURL('../relative-path') // returns false +// +// getScheme / getHost / getHostname / getPort / getEscapedPath / getQuery +// +// Return the parsed components of a URL. +// +// - getScheme: If absent in the URL, returns an empty string. +// +// - getHostname: IPv6 addresses are returned without braces, e.g. "::1". If absent in the URL, returns an empty string. +// +// - getHost: IPv6 addresses are returned with braces, e.g. "[::1]". If absent in the URL, returns an empty string. +// +// - getEscapedPath: The string returned by getEscapedPath is URL escaped, e.g. "with space" becomes "with%20space". +// If absent in the URL, returns an empty string. +// +// - getPort: If absent in the URL, returns an empty string. +// +// - getQuery: Returns the query parameters in "matrix" form where a repeated query key is interpreted to +// mean that there are multiple values for that key. The keys and values are returned unescaped. +// If absent in the URL, returns an empty map. +// +// .getScheme() +// .getHost() +// .getHostname() +// .getPort() +// .getEscapedPath() +// .getQuery() , > +// +// Examples: +// +// url('/path').getScheme() // returns '' +// url('https://example.com/').getScheme() // returns 'https' +// url('https://example.com:80/').getHost() // returns 'example.com:80' +// url('https://example.com/').getHost() // returns 'example.com' +// url('https://[::1]:80/').getHost() // returns '[::1]:80' +// url('https://[::1]/').getHost() // returns '[::1]' +// url('/path').getHost() // returns '' +// url('https://example.com:80/').getHostname() // returns 'example.com' +// url('https://127.0.0.1:80/').getHostname() // returns '127.0.0.1' +// url('https://[::1]:80/').getHostname() // returns '::1' +// url('/path').getHostname() // returns '' +// url('https://example.com:80/').getPort() // returns '80' +// url('https://example.com/').getPort() // returns '' +// url('/path').getPort() // returns '' +// url('https://example.com/path').getEscapedPath() // returns '/path' +// url('https://example.com/path with spaces/').getEscapedPath() // returns '/path%20with%20spaces/' +// url('https://example.com').getEscapedPath() // returns '' +// url('https://example.com/path?k1=a&k2=b&k2=c').getQuery() // returns { 'k1': ['a'], 'k2': ['b', 'c']} +// url('https://example.com/path?key with spaces=value with spaces').getQuery() // returns { 'key with spaces': ['value with spaces']} +// url('https://example.com/path?').getQuery() // returns {} +// url('https://example.com/path').getQuery() // returns {} +func URLs() cel.EnvOption { + return cel.Lib(urlsLib) +} + +var urlsLib = &urls{} + +type urls struct{} + +func (*urls) LibraryName() string { + return "k8s.urls" +} + +var urlLibraryDecls = map[string][]cel.FunctionOpt{ + "url": { + cel.Overload("string_to_url", []*cel.Type{cel.StringType}, apiservercel.URLType, + cel.UnaryBinding(stringToUrl))}, + "getScheme": { + cel.MemberOverload("url_get_scheme", []*cel.Type{apiservercel.URLType}, cel.StringType, + cel.UnaryBinding(getScheme))}, + "getHost": { + cel.MemberOverload("url_get_host", []*cel.Type{apiservercel.URLType}, cel.StringType, + cel.UnaryBinding(getHost))}, + "getHostname": { + cel.MemberOverload("url_get_hostname", []*cel.Type{apiservercel.URLType}, cel.StringType, + cel.UnaryBinding(getHostname))}, + "getPort": { + cel.MemberOverload("url_get_port", []*cel.Type{apiservercel.URLType}, cel.StringType, + cel.UnaryBinding(getPort))}, + "getEscapedPath": { + cel.MemberOverload("url_get_escaped_path", []*cel.Type{apiservercel.URLType}, cel.StringType, + cel.UnaryBinding(getEscapedPath))}, + "getQuery": { + cel.MemberOverload("url_get_query", []*cel.Type{apiservercel.URLType}, + cel.MapType(cel.StringType, cel.ListType(cel.StringType)), + cel.UnaryBinding(getQuery))}, + "isURL": { + cel.Overload("is_url_string", []*cel.Type{cel.StringType}, cel.BoolType, + cel.UnaryBinding(isURL))}, +} + +func (*urls) CompileOptions() []cel.EnvOption { + options := []cel.EnvOption{} + for name, overloads := range urlLibraryDecls { + options = append(options, cel.Function(name, overloads...)) + } + return options +} + +func (*urls) ProgramOptions() []cel.ProgramOption { + return []cel.ProgramOption{} +} + +func stringToUrl(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + // Use ParseRequestURI to check the URL before conversion. + // ParseRequestURI requires absolute URLs and is used by the OpenAPIv3 'uri' format. + _, err := url.ParseRequestURI(s) + if err != nil { + return types.NewErr("URL parse error during conversion from string: %v", err) + } + // We must parse again with Parse since ParseRequestURI incorrectly parses URLs that contain a fragment + // part and will incorrectly append the fragment to either the path or the query, depending on which it was adjacent to. + u, err := url.Parse(s) + if err != nil { + // Errors are not expected here since Parse is a more lenient parser than ParseRequestURI. + return types.NewErr("URL parse error during conversion from string: %v", err) + } + return apiservercel.URL{URL: u} +} + +func getScheme(arg ref.Val) ref.Val { + u, ok := arg.Value().(*url.URL) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.String(u.Scheme) +} + +func getHost(arg ref.Val) ref.Val { + u, ok := arg.Value().(*url.URL) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.String(u.Host) +} + +func getHostname(arg ref.Val) ref.Val { + u, ok := arg.Value().(*url.URL) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.String(u.Hostname()) +} + +func getPort(arg ref.Val) ref.Val { + u, ok := arg.Value().(*url.URL) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.String(u.Port()) +} + +func getEscapedPath(arg ref.Val) ref.Val { + u, ok := arg.Value().(*url.URL) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + return types.String(u.EscapedPath()) +} + +func getQuery(arg ref.Val) ref.Val { + u, ok := arg.Value().(*url.URL) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + + result := map[ref.Val]ref.Val{} + for k, v := range u.Query() { + result[types.String(k)] = types.NewStringList(types.DefaultTypeAdapter, v) + } + return types.NewRefValMap(types.DefaultTypeAdapter, result) +} + +func isURL(arg ref.Val) ref.Val { + s, ok := arg.Value().(string) + if !ok { + return types.MaybeNoSuchOverloadErr(arg) + } + _, err := url.ParseRequestURI(s) + return types.Bool(err == nil) +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/limits.go b/vendor/k8s.io/apiserver/pkg/cel/limits.go new file mode 100644 index 00000000..66ab4e44 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/limits.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import celconfig "k8s.io/apiserver/pkg/apis/cel" + +const ( + // DefaultMaxRequestSizeBytes is the size of the largest request that will be accepted + DefaultMaxRequestSizeBytes = celconfig.MaxRequestSizeBytes + + // MaxDurationSizeJSON + // OpenAPI duration strings follow RFC 3339, section 5.6 - see the comment on maxDatetimeSizeJSON + MaxDurationSizeJSON = 32 + // MaxDatetimeSizeJSON + // OpenAPI datetime strings follow RFC 3339, section 5.6, and the longest possible + // such string is 9999-12-31T23:59:59.999999999Z, which has length 30 - we add 2 + // to allow for quotation marks + MaxDatetimeSizeJSON = 32 + // MinDurationSizeJSON + // Golang allows a string of 0 to be parsed as a duration, so that plus 2 to account for + // quotation marks makes 3 + MinDurationSizeJSON = 3 + // JSONDateSize is the size of a date serialized as part of a JSON object + // RFC 3339 dates require YYYY-MM-DD, and then we add 2 to allow for quotation marks + JSONDateSize = 12 + // MinDatetimeSizeJSON is the minimal length of a datetime formatted as RFC 3339 + // RFC 3339 datetimes require a full date (YYYY-MM-DD) and full time (HH:MM:SS), and we add 3 for + // quotation marks like always in addition to the capital T that separates the date and time + MinDatetimeSizeJSON = 21 + // MinStringSize is the size of literal "" + MinStringSize = 2 + // MinBoolSize is the length of literal true + MinBoolSize = 4 + // MinNumberSize is the length of literal 0 + MinNumberSize = 1 + + MaxNameFormatRegexSize = 128 +) diff --git a/vendor/k8s.io/apiserver/pkg/cel/quantity.go b/vendor/k8s.io/apiserver/pkg/cel/quantity.go new file mode 100644 index 00000000..ce823964 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/quantity.go @@ -0,0 +1,76 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "k8s.io/apimachinery/pkg/api/resource" +) + +var ( + QuantityObject = decls.NewObjectType("kubernetes.Quantity") + quantityTypeValue = types.NewTypeValue("kubernetes.Quantity") + QuantityType = cel.ObjectType("kubernetes.Quantity") +) + +// Quantity provdes a CEL representation of a resource.Quantity +type Quantity struct { + *resource.Quantity +} + +func (d Quantity) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + if reflect.TypeOf(d.Quantity).AssignableTo(typeDesc) { + return d.Quantity, nil + } + if reflect.TypeOf("").AssignableTo(typeDesc) { + return d.Quantity.String(), nil + } + return nil, fmt.Errorf("type conversion error from 'Quantity' to '%v'", typeDesc) +} + +func (d Quantity) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case quantityTypeValue: + return d + case types.TypeType: + return quantityTypeValue + default: + return types.NewErr("type conversion error from '%s' to '%s'", quantityTypeValue, typeVal) + } +} + +func (d Quantity) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(Quantity) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(d.Quantity.Equal(*otherDur.Quantity)) +} + +func (d Quantity) Type() ref.Type { + return quantityTypeValue +} + +func (d Quantity) Value() interface{} { + return d.Quantity +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/types.go b/vendor/k8s.io/apiserver/pkg/cel/types.go new file mode 100644 index 00000000..83c90c89 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/types.go @@ -0,0 +1,598 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "math" + "time" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" + + exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" +) + +const ( + noMaxLength = math.MaxInt +) + +// NewListType returns a parameterized list type with a specified element type. +func NewListType(elem *DeclType, maxItems int64) *DeclType { + return &DeclType{ + name: "list", + ElemType: elem, + MaxElements: maxItems, + celType: cel.ListType(elem.CelType()), + defaultValue: NewListValue(), + // a list can always be represented as [] in JSON, so hardcode the min size + // to 2 + MinSerializedSize: 2, + } +} + +// NewMapType returns a parameterized map type with the given key and element types. +func NewMapType(key, elem *DeclType, maxProperties int64) *DeclType { + return &DeclType{ + name: "map", + KeyType: key, + ElemType: elem, + MaxElements: maxProperties, + celType: cel.MapType(key.CelType(), elem.CelType()), + defaultValue: NewMapValue(), + // a map can always be represented as {} in JSON, so hardcode the min size + // to 2 + MinSerializedSize: 2, + } +} + +// NewObjectType creates an object type with a qualified name and a set of field declarations. +func NewObjectType(name string, fields map[string]*DeclField) *DeclType { + t := &DeclType{ + name: name, + Fields: fields, + celType: cel.ObjectType(name), + traitMask: traits.FieldTesterType | traits.IndexerType, + // an object could potentially be larger than the min size we default to here ({}), + // but we rely upon the caller to change MinSerializedSize accordingly if they add + // properties to the object + MinSerializedSize: 2, + } + t.defaultValue = NewObjectValue(t) + return t +} + +func NewSimpleTypeWithMinSize(name string, celType *cel.Type, zeroVal ref.Val, minSize int64) *DeclType { + return &DeclType{ + name: name, + celType: celType, + defaultValue: zeroVal, + MinSerializedSize: minSize, + } +} + +// DeclType represents the universal type descriptor for OpenAPIv3 types. +type DeclType struct { + fmt.Stringer + + name string + // Fields contains a map of escaped CEL identifier field names to field declarations. + Fields map[string]*DeclField + KeyType *DeclType + ElemType *DeclType + TypeParam bool + Metadata map[string]string + MaxElements int64 + // MinSerializedSize represents the smallest possible size in bytes that + // the DeclType could be serialized to in JSON. + MinSerializedSize int64 + + celType *cel.Type + traitMask int + defaultValue ref.Val +} + +// MaybeAssignTypeName attempts to set the DeclType name to a fully qualified name, if the type +// is of `object` type. +// +// The DeclType must return true for `IsObject` or this assignment will error. +func (t *DeclType) MaybeAssignTypeName(name string) *DeclType { + if t.IsObject() { + objUpdated := false + if t.name != "object" { + name = t.name + } else { + objUpdated = true + } + fieldMap := make(map[string]*DeclField, len(t.Fields)) + for fieldName, field := range t.Fields { + fieldType := field.Type + fieldTypeName := fmt.Sprintf("%s.%s", name, fieldName) + updated := fieldType.MaybeAssignTypeName(fieldTypeName) + if updated == fieldType { + fieldMap[fieldName] = field + continue + } + objUpdated = true + fieldMap[fieldName] = &DeclField{ + Name: fieldName, + Type: updated, + Required: field.Required, + enumValues: field.enumValues, + defaultValue: field.defaultValue, + } + } + if !objUpdated { + return t + } + return &DeclType{ + name: name, + Fields: fieldMap, + KeyType: t.KeyType, + ElemType: t.ElemType, + TypeParam: t.TypeParam, + Metadata: t.Metadata, + celType: cel.ObjectType(name), + traitMask: t.traitMask, + defaultValue: t.defaultValue, + MinSerializedSize: t.MinSerializedSize, + } + } + if t.IsMap() { + elemTypeName := fmt.Sprintf("%s.@elem", name) + updated := t.ElemType.MaybeAssignTypeName(elemTypeName) + if updated == t.ElemType { + return t + } + return NewMapType(t.KeyType, updated, t.MaxElements) + } + if t.IsList() { + elemTypeName := fmt.Sprintf("%s.@idx", name) + updated := t.ElemType.MaybeAssignTypeName(elemTypeName) + if updated == t.ElemType { + return t + } + return NewListType(updated, t.MaxElements) + } + return t +} + +// ExprType returns the CEL expression type of this declaration. +func (t *DeclType) ExprType() (*exprpb.Type, error) { + return cel.TypeToExprType(t.celType) +} + +// CelType returns the CEL type of this declaration. +func (t *DeclType) CelType() *cel.Type { + return t.celType +} + +// FindField returns the DeclField with the given name if present. +func (t *DeclType) FindField(name string) (*DeclField, bool) { + f, found := t.Fields[name] + return f, found +} + +// HasTrait implements the CEL ref.Type interface making this type declaration suitable for use +// within the CEL evaluator. +func (t *DeclType) HasTrait(trait int) bool { + if t.traitMask&trait == trait { + return true + } + if t.defaultValue == nil { + return false + } + _, isDecl := t.defaultValue.Type().(*DeclType) + if isDecl { + return false + } + return t.defaultValue.Type().HasTrait(trait) +} + +// IsList returns whether the declaration is a `list` type which defines a parameterized element +// type, but not a parameterized key type or fields. +func (t *DeclType) IsList() bool { + return t.KeyType == nil && t.ElemType != nil && t.Fields == nil +} + +// IsMap returns whether the declaration is a 'map' type which defines parameterized key and +// element types, but not fields. +func (t *DeclType) IsMap() bool { + return t.KeyType != nil && t.ElemType != nil && t.Fields == nil +} + +// IsObject returns whether the declartion is an 'object' type which defined a set of typed fields. +func (t *DeclType) IsObject() bool { + return t.KeyType == nil && t.ElemType == nil && t.Fields != nil +} + +// String implements the fmt.Stringer interface method. +func (t *DeclType) String() string { + return t.name +} + +// TypeName returns the fully qualified type name for the DeclType. +func (t *DeclType) TypeName() string { + return t.name +} + +// DefaultValue returns the CEL ref.Val representing the default value for this object type, +// if one exists. +func (t *DeclType) DefaultValue() ref.Val { + return t.defaultValue +} + +// FieldTypeMap constructs a map of the field and object types nested within a given type. +func FieldTypeMap(path string, t *DeclType) map[string]*DeclType { + if t.IsObject() && t.TypeName() != "object" { + path = t.TypeName() + } + types := make(map[string]*DeclType) + buildDeclTypes(path, t, types) + return types +} + +func buildDeclTypes(path string, t *DeclType, types map[string]*DeclType) { + // Ensure object types are properly named according to where they appear in the schema. + if t.IsObject() { + // Hack to ensure that names are uniquely qualified and work well with the type + // resolution steps which require fully qualified type names for field resolution + // to function properly. + types[t.TypeName()] = t + for name, field := range t.Fields { + fieldPath := fmt.Sprintf("%s.%s", path, name) + buildDeclTypes(fieldPath, field.Type, types) + } + } + // Map element properties to type names if needed. + if t.IsMap() { + mapElemPath := fmt.Sprintf("%s.@elem", path) + buildDeclTypes(mapElemPath, t.ElemType, types) + types[path] = t + } + // List element properties. + if t.IsList() { + listIdxPath := fmt.Sprintf("%s.@idx", path) + buildDeclTypes(listIdxPath, t.ElemType, types) + types[path] = t + } +} + +// DeclField describes the name, ordinal, and optionality of a field declaration within a type. +type DeclField struct { + Name string + Type *DeclType + Required bool + enumValues []interface{} + defaultValue interface{} +} + +func NewDeclField(name string, declType *DeclType, required bool, enumValues []interface{}, defaultValue interface{}) *DeclField { + return &DeclField{ + Name: name, + Type: declType, + Required: required, + enumValues: enumValues, + defaultValue: defaultValue, + } +} + +// TypeName returns the string type name of the field. +func (f *DeclField) TypeName() string { + return f.Type.TypeName() +} + +// DefaultValue returns the zero value associated with the field. +func (f *DeclField) DefaultValue() ref.Val { + if f.defaultValue != nil { + return types.DefaultTypeAdapter.NativeToValue(f.defaultValue) + } + return f.Type.DefaultValue() +} + +// EnumValues returns the set of values that this field may take. +func (f *DeclField) EnumValues() []ref.Val { + if f.enumValues == nil || len(f.enumValues) == 0 { + return []ref.Val{} + } + ev := make([]ref.Val, len(f.enumValues)) + for i, e := range f.enumValues { + ev[i] = types.DefaultTypeAdapter.NativeToValue(e) + } + return ev +} + +func allTypesForDecl(declTypes []*DeclType) map[string]*DeclType { + if declTypes == nil { + return nil + } + allTypes := map[string]*DeclType{} + for _, declType := range declTypes { + for k, t := range FieldTypeMap(declType.TypeName(), declType) { + allTypes[k] = t + } + } + + return allTypes +} + +// NewDeclTypeProvider returns an Open API Schema-based type-system which is CEL compatible. +func NewDeclTypeProvider(rootTypes ...*DeclType) *DeclTypeProvider { + // Note, if the schema indicates that it's actually based on another proto + // then prefer the proto definition. For expressions in the proto, a new field + // annotation will be needed to indicate the expected environment and type of + // the expression. + allTypes := allTypesForDecl(rootTypes) + return &DeclTypeProvider{ + registeredTypes: allTypes, + } +} + +// DeclTypeProvider extends the CEL ref.TypeProvider interface and provides an Open API Schema-based +// type-system. +type DeclTypeProvider struct { + registeredTypes map[string]*DeclType + typeProvider types.Provider + typeAdapter types.Adapter + recognizeKeywordAsFieldName bool +} + +func (rt *DeclTypeProvider) SetRecognizeKeywordAsFieldName(recognize bool) { + rt.recognizeKeywordAsFieldName = recognize +} + +func (rt *DeclTypeProvider) EnumValue(enumName string) ref.Val { + return rt.typeProvider.EnumValue(enumName) +} + +func (rt *DeclTypeProvider) FindIdent(identName string) (ref.Val, bool) { + return rt.typeProvider.FindIdent(identName) +} + +// EnvOptions returns a set of cel.EnvOption values which includes the declaration set +// as well as a custom ref.TypeProvider. +// +// If the DeclTypeProvider value is nil, an empty []cel.EnvOption set is returned. +func (rt *DeclTypeProvider) EnvOptions(tp types.Provider) ([]cel.EnvOption, error) { + if rt == nil { + return []cel.EnvOption{}, nil + } + rtWithTypes, err := rt.WithTypeProvider(tp) + if err != nil { + return nil, err + } + return []cel.EnvOption{ + cel.CustomTypeProvider(rtWithTypes), + cel.CustomTypeAdapter(rtWithTypes), + }, nil +} + +// WithTypeProvider returns a new DeclTypeProvider that sets the given TypeProvider +// If the original DeclTypeProvider is nil, the returned DeclTypeProvider is still nil. +func (rt *DeclTypeProvider) WithTypeProvider(tp types.Provider) (*DeclTypeProvider, error) { + if rt == nil { + return nil, nil + } + var ta types.Adapter = types.DefaultTypeAdapter + tpa, ok := tp.(types.Adapter) + if ok { + ta = tpa + } + rtWithTypes := &DeclTypeProvider{ + typeProvider: tp, + typeAdapter: ta, + registeredTypes: rt.registeredTypes, + recognizeKeywordAsFieldName: rt.recognizeKeywordAsFieldName, + } + for name, declType := range rt.registeredTypes { + tpType, found := tp.FindStructType(name) + // cast celType to types.type + + expT := declType.CelType() + if found && !expT.IsExactType(tpType) { + return nil, fmt.Errorf( + "type %s definition differs between CEL environment and type provider", name) + } + + } + return rtWithTypes, nil +} + +// FindStructType attempts to resolve the typeName provided from the rule's rule-schema, or if not +// from the embedded ref.TypeProvider. +// +// FindStructType overrides the default type-finding behavior of the embedded TypeProvider. +// +// Note, when the type name is based on the Open API Schema, the name will reflect the object path +// where the type definition appears. +func (rt *DeclTypeProvider) FindStructType(typeName string) (*types.Type, bool) { + if rt == nil { + return nil, false + } + declType, found := rt.findDeclType(typeName) + if found { + expT := declType.CelType() + return expT, found + } + return rt.typeProvider.FindStructType(typeName) +} + +// FindDeclType returns the CPT type description which can be mapped to a CEL type. +func (rt *DeclTypeProvider) FindDeclType(typeName string) (*DeclType, bool) { + if rt == nil { + return nil, false + } + return rt.findDeclType(typeName) +} + +// FindStructFieldNames returns the field names associated with the type, if the type +// is found. +func (rt *DeclTypeProvider) FindStructFieldNames(typeName string) ([]string, bool) { + return []string{}, false +} + +// FindStructFieldType returns a field type given a type name and field name, if found. +// +// Note, the type name for an Open API Schema type is likely to be its qualified object path. +// If, in the future an object instance rather than a type name were provided, the field +// resolution might more accurately reflect the expected type model. However, in this case +// concessions were made to align with the existing CEL interfaces. +func (rt *DeclTypeProvider) FindStructFieldType(typeName, fieldName string) (*types.FieldType, bool) { + st, found := rt.findDeclType(typeName) + if !found { + return rt.typeProvider.FindStructFieldType(typeName, fieldName) + } + + f, found := st.Fields[fieldName] + if rt.recognizeKeywordAsFieldName && !found && celReservedSymbols.Has(fieldName) { + f, found = st.Fields["__"+fieldName+"__"] + } + + if found { + ft := f.Type + expT := ft.CelType() + return &types.FieldType{ + Type: expT, + }, true + } + // This could be a dynamic map. + if st.IsMap() { + et := st.ElemType + expT := et.CelType() + return &types.FieldType{ + Type: expT, + }, true + } + return nil, false +} + +// NativeToValue is an implementation of the ref.TypeAdapater interface which supports conversion +// of rule values to CEL ref.Val instances. +func (rt *DeclTypeProvider) NativeToValue(val interface{}) ref.Val { + return rt.typeAdapter.NativeToValue(val) +} + +func (rt *DeclTypeProvider) NewValue(typeName string, fields map[string]ref.Val) ref.Val { + // TODO: implement for OpenAPI types to enable CEL object instantiation, which is needed + // for mutating admission. + return rt.typeProvider.NewValue(typeName, fields) +} + +// TypeNames returns the list of type names declared within the DeclTypeProvider object. +func (rt *DeclTypeProvider) TypeNames() []string { + typeNames := make([]string, len(rt.registeredTypes)) + i := 0 + for name := range rt.registeredTypes { + typeNames[i] = name + i++ + } + return typeNames +} + +func (rt *DeclTypeProvider) findDeclType(typeName string) (*DeclType, bool) { + declType, found := rt.registeredTypes[typeName] + if found { + return declType, true + } + declType = findScalar(typeName) + return declType, declType != nil +} + +func findScalar(typename string) *DeclType { + switch typename { + case BoolType.TypeName(): + return BoolType + case BytesType.TypeName(): + return BytesType + case DoubleType.TypeName(): + return DoubleType + case DurationType.TypeName(): + return DurationType + case IntType.TypeName(): + return IntType + case NullType.TypeName(): + return NullType + case StringType.TypeName(): + return StringType + case TimestampType.TypeName(): + return TimestampType + case UintType.TypeName(): + return UintType + case ListType.TypeName(): + return ListType + case MapType.TypeName(): + return MapType + default: + return nil + } +} + +var ( + // AnyType is equivalent to the CEL 'protobuf.Any' type in that the value may have any of the + // types supported. + AnyType = NewSimpleTypeWithMinSize("any", cel.AnyType, nil, 1) + + // BoolType is equivalent to the CEL 'bool' type. + BoolType = NewSimpleTypeWithMinSize("bool", cel.BoolType, types.False, MinBoolSize) + + // BytesType is equivalent to the CEL 'bytes' type. + BytesType = NewSimpleTypeWithMinSize("bytes", cel.BytesType, types.Bytes([]byte{}), MinStringSize) + + // DoubleType is equivalent to the CEL 'double' type which is a 64-bit floating point value. + DoubleType = NewSimpleTypeWithMinSize("double", cel.DoubleType, types.Double(0), MinNumberSize) + + // DurationType is equivalent to the CEL 'duration' type. + DurationType = NewSimpleTypeWithMinSize("duration", cel.DurationType, types.Duration{Duration: time.Duration(0)}, MinDurationSizeJSON) + + // DateType is equivalent to the CEL 'date' type. + DateType = NewSimpleTypeWithMinSize("date", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize) + + // DynType is the equivalent of the CEL 'dyn' concept which indicates that the type will be + // determined at runtime rather than compile time. + DynType = NewSimpleTypeWithMinSize("dyn", cel.DynType, nil, 1) + + // IntType is equivalent to the CEL 'int' type which is a 64-bit signed int. + IntType = NewSimpleTypeWithMinSize("int", cel.IntType, types.IntZero, MinNumberSize) + + // NullType is equivalent to the CEL 'null_type'. + NullType = NewSimpleTypeWithMinSize("null_type", cel.NullType, types.NullValue, 4) + + // StringType is equivalent to the CEL 'string' type which is expected to be a UTF-8 string. + // StringType values may either be string literals or expression strings. + StringType = NewSimpleTypeWithMinSize("string", cel.StringType, types.String(""), MinStringSize) + + // TimestampType corresponds to the well-known protobuf.Timestamp type supported within CEL. + // Note that both the OpenAPI date and date-time types map onto TimestampType, so not all types + // labeled as Timestamp will necessarily have the same MinSerializedSize. + TimestampType = NewSimpleTypeWithMinSize("timestamp", cel.TimestampType, types.Timestamp{Time: time.Time{}}, JSONDateSize) + + // QuantityDeclType wraps a [QuantityType] and makes it usable with functions that expect + // a [DeclType]. + QuantityDeclType = NewSimpleTypeWithMinSize("quantity", QuantityType, Quantity{Quantity: resource.NewQuantity(0, resource.DecimalSI)}, 8) + + // UintType is equivalent to the CEL 'uint' type. + UintType = NewSimpleTypeWithMinSize("uint", cel.UintType, types.Uint(0), 1) + + // ListType is equivalent to the CEL 'list' type. + ListType = NewListType(AnyType, noMaxLength) + + // MapType is equivalent to the CEL 'map' type. + MapType = NewMapType(AnyType, AnyType, noMaxLength) +) diff --git a/vendor/k8s.io/apiserver/pkg/cel/url.go b/vendor/k8s.io/apiserver/pkg/cel/url.go new file mode 100644 index 00000000..6800205c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/url.go @@ -0,0 +1,80 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "net/url" + "reflect" + + "github.com/google/cel-go/cel" + "github.com/google/cel-go/checker/decls" + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" +) + +// URL provides a CEL representation of a URL. +type URL struct { + *url.URL +} + +var ( + URLObject = decls.NewObjectType("kubernetes.URL") + typeValue = types.NewTypeValue("kubernetes.URL") + URLType = cel.ObjectType("kubernetes.URL") +) + +// ConvertToNative implements ref.Val.ConvertToNative. +func (d URL) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + if reflect.TypeOf(d.URL).AssignableTo(typeDesc) { + return d.URL, nil + } + if reflect.TypeOf("").AssignableTo(typeDesc) { + return d.URL.String(), nil + } + return nil, fmt.Errorf("type conversion error from 'URL' to '%v'", typeDesc) +} + +// ConvertToType implements ref.Val.ConvertToType. +func (d URL) ConvertToType(typeVal ref.Type) ref.Val { + switch typeVal { + case typeValue: + return d + case types.TypeType: + return typeValue + } + return types.NewErr("type conversion error from '%s' to '%s'", typeValue, typeVal) +} + +// Equal implements ref.Val.Equal. +func (d URL) Equal(other ref.Val) ref.Val { + otherDur, ok := other.(URL) + if !ok { + return types.MaybeNoSuchOverloadErr(other) + } + return types.Bool(d.URL.String() == otherDur.URL.String()) +} + +// Type implements ref.Val.Type. +func (d URL) Type() ref.Type { + return typeValue +} + +// Value implements ref.Val.Value. +func (d URL) Value() interface{} { + return d.URL +} diff --git a/vendor/k8s.io/apiserver/pkg/cel/value.go b/vendor/k8s.io/apiserver/pkg/cel/value.go new file mode 100644 index 00000000..01c7f20a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/cel/value.go @@ -0,0 +1,769 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cel + +import ( + "fmt" + "reflect" + "sync" + "time" + + "github.com/google/cel-go/common/types" + "github.com/google/cel-go/common/types/ref" + "github.com/google/cel-go/common/types/traits" +) + +// EncodeStyle is a hint for string encoding of parsed values. +type EncodeStyle int + +const ( + // BlockValueStyle is the default string encoding which preserves whitespace and newlines. + BlockValueStyle EncodeStyle = iota + + // FlowValueStyle indicates that the string is an inline representation of complex types. + FlowValueStyle + + // FoldedValueStyle is a multiline string with whitespace and newlines trimmed to a single + // a whitespace. Repeated newlines are replaced with a single newline rather than a single + // whitespace. + FoldedValueStyle + + // LiteralStyle is a multiline string that preserves newlines, but trims all other whitespace + // to a single character. + LiteralStyle +) + +// NewEmptyDynValue returns the zero-valued DynValue. +func NewEmptyDynValue() *DynValue { + // note: 0 is not a valid parse node identifier. + dv, _ := NewDynValue(0, nil) + return dv +} + +// NewDynValue returns a DynValue that corresponds to a parse node id and value. +func NewDynValue(id int64, val interface{}) (*DynValue, error) { + dv := &DynValue{ID: id} + err := dv.SetValue(val) + return dv, err +} + +// DynValue is a dynamically typed value used to describe unstructured content. +// Whether the value has the desired type is determined by where it is used within the Instance or +// Template, and whether there are schemas which might enforce a more rigid type definition. +type DynValue struct { + ID int64 + EncodeStyle EncodeStyle + value interface{} + exprValue ref.Val + declType *DeclType +} + +// DeclType returns the policy model type of the dyn value. +func (dv *DynValue) DeclType() *DeclType { + return dv.declType +} + +// ConvertToNative is an implementation of the CEL ref.Val method used to adapt between CEL types +// and Go-native types. +// +// The default behavior of this method is to first convert to a CEL type which has a well-defined +// set of conversion behaviors and proxy to the CEL ConvertToNative method for the type. +func (dv *DynValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + ev := dv.ExprValue() + if types.IsError(ev) { + return nil, ev.(*types.Err) + } + return ev.ConvertToNative(typeDesc) +} + +// Equal returns whether the dyn value is equal to a given CEL value. +func (dv *DynValue) Equal(other ref.Val) ref.Val { + dvType := dv.Type() + otherType := other.Type() + // Preserve CEL's homogeneous equality constraint. + if dvType.TypeName() != otherType.TypeName() { + return types.MaybeNoSuchOverloadErr(other) + } + switch v := dv.value.(type) { + case ref.Val: + return v.Equal(other) + case PlainTextValue: + return celBool(string(v) == other.Value().(string)) + case *MultilineStringValue: + return celBool(v.Value == other.Value().(string)) + case time.Duration: + otherDuration := other.Value().(time.Duration) + return celBool(v == otherDuration) + case time.Time: + otherTimestamp := other.Value().(time.Time) + return celBool(v.Equal(otherTimestamp)) + default: + return celBool(reflect.DeepEqual(v, other.Value())) + } +} + +// ExprValue converts the DynValue into a CEL value. +func (dv *DynValue) ExprValue() ref.Val { + return dv.exprValue +} + +// Value returns the underlying value held by this reference. +func (dv *DynValue) Value() interface{} { + return dv.value +} + +// SetValue updates the underlying value held by this reference. +func (dv *DynValue) SetValue(value interface{}) error { + dv.value = value + var err error + dv.exprValue, dv.declType, err = exprValue(value) + return err +} + +// Type returns the CEL type for the given value. +func (dv *DynValue) Type() ref.Type { + return dv.ExprValue().Type() +} + +func exprValue(value interface{}) (ref.Val, *DeclType, error) { + switch v := value.(type) { + case bool: + return types.Bool(v), BoolType, nil + case []byte: + return types.Bytes(v), BytesType, nil + case float64: + return types.Double(v), DoubleType, nil + case int64: + return types.Int(v), IntType, nil + case string: + return types.String(v), StringType, nil + case uint64: + return types.Uint(v), UintType, nil + case time.Duration: + return types.Duration{Duration: v}, DurationType, nil + case time.Time: + return types.Timestamp{Time: v}, TimestampType, nil + case types.Null: + return v, NullType, nil + case *ListValue: + return v, ListType, nil + case *MapValue: + return v, MapType, nil + case *ObjectValue: + return v, v.objectType, nil + default: + return nil, unknownType, fmt.Errorf("unsupported type: (%T)%v", v, v) + } +} + +// PlainTextValue is a text string literal which must not be treated as an expression. +type PlainTextValue string + +// MultilineStringValue is a multiline string value which has been parsed in a way which omits +// whitespace as well as a raw form which preserves whitespace. +type MultilineStringValue struct { + Value string + Raw string +} + +func newStructValue() *structValue { + return &structValue{ + Fields: []*Field{}, + fieldMap: map[string]*Field{}, + } +} + +type structValue struct { + Fields []*Field + fieldMap map[string]*Field +} + +// AddField appends a MapField to the MapValue and indexes the field by name. +func (sv *structValue) AddField(field *Field) { + sv.Fields = append(sv.Fields, field) + sv.fieldMap[field.Name] = field +} + +// ConvertToNative converts the MapValue type to a native go types. +func (sv *structValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + if typeDesc.Kind() != reflect.Map && + typeDesc.Kind() != reflect.Struct && + typeDesc.Kind() != reflect.Pointer && + typeDesc.Kind() != reflect.Interface { + return nil, fmt.Errorf("type conversion error from object to '%v'", typeDesc) + } + + // Unwrap pointers, but track their use. + isPtr := false + if typeDesc.Kind() == reflect.Pointer { + tk := typeDesc + typeDesc = typeDesc.Elem() + if typeDesc.Kind() == reflect.Pointer { + return nil, fmt.Errorf("unsupported type conversion to '%v'", tk) + } + isPtr = true + } + + if typeDesc.Kind() == reflect.Map { + keyType := typeDesc.Key() + if keyType.Kind() != reflect.String && keyType.Kind() != reflect.Interface { + return nil, fmt.Errorf("object fields cannot be converted to type '%v'", keyType) + } + elemType := typeDesc.Elem() + sz := len(sv.fieldMap) + ntvMap := reflect.MakeMapWithSize(typeDesc, sz) + for name, val := range sv.fieldMap { + refVal, err := val.Ref.ConvertToNative(elemType) + if err != nil { + return nil, err + } + ntvMap.SetMapIndex(reflect.ValueOf(name), reflect.ValueOf(refVal)) + } + return ntvMap.Interface(), nil + } + + if typeDesc.Kind() == reflect.Struct { + ntvObjPtr := reflect.New(typeDesc) + ntvObj := ntvObjPtr.Elem() + for name, val := range sv.fieldMap { + f := ntvObj.FieldByName(name) + if !f.IsValid() { + return nil, fmt.Errorf("type conversion error, no such field %s in type %v", + name, typeDesc) + } + fv, err := val.Ref.ConvertToNative(f.Type()) + if err != nil { + return nil, err + } + f.Set(reflect.ValueOf(fv)) + } + if isPtr { + return ntvObjPtr.Interface(), nil + } + return ntvObj.Interface(), nil + } + return nil, fmt.Errorf("type conversion error from object to '%v'", typeDesc) +} + +// GetField returns a MapField by name if one exists. +func (sv *structValue) GetField(name string) (*Field, bool) { + field, found := sv.fieldMap[name] + return field, found +} + +// IsSet returns whether the given field, which is defined, has also been set. +func (sv *structValue) IsSet(key ref.Val) ref.Val { + k, ok := key.(types.String) + if !ok { + return types.MaybeNoSuchOverloadErr(key) + } + name := string(k) + _, found := sv.fieldMap[name] + return celBool(found) +} + +// NewObjectValue creates a struct value with a schema type and returns the empty ObjectValue. +func NewObjectValue(sType *DeclType) *ObjectValue { + return &ObjectValue{ + structValue: newStructValue(), + objectType: sType, + } +} + +// ObjectValue is a struct with a custom schema type which indicates the fields and types +// associated with the structure. +type ObjectValue struct { + *structValue + objectType *DeclType +} + +// ConvertToType is an implementation of the CEL ref.Val interface method. +func (o *ObjectValue) ConvertToType(t ref.Type) ref.Val { + if t == types.TypeType { + return types.NewObjectTypeValue(o.objectType.TypeName()) + } + if t.TypeName() == o.objectType.TypeName() { + return o + } + return types.NewErr("type conversion error from '%s' to '%s'", o.Type(), t) +} + +// Equal returns true if the two object types are equal and their field values are equal. +func (o *ObjectValue) Equal(other ref.Val) ref.Val { + // Preserve CEL's homogeneous equality semantics. + if o.objectType.TypeName() != other.Type().TypeName() { + return types.MaybeNoSuchOverloadErr(other) + } + o2 := other.(traits.Indexer) + for name := range o.objectType.Fields { + k := types.String(name) + v := o.Get(k) + ov := o2.Get(k) + vEq := v.Equal(ov) + if vEq != types.True { + return vEq + } + } + return types.True +} + +// Get returns the value of the specified field. +// +// If the field is set, its value is returned. If the field is not set, the default value for the +// field is returned thus allowing for safe-traversal and preserving proto-like field traversal +// semantics for Open API Schema backed types. +func (o *ObjectValue) Get(name ref.Val) ref.Val { + n, ok := name.(types.String) + if !ok { + return types.MaybeNoSuchOverloadErr(n) + } + nameStr := string(n) + field, found := o.fieldMap[nameStr] + if found { + return field.Ref.ExprValue() + } + fieldDef, found := o.objectType.Fields[nameStr] + if !found { + return types.NewErr("no such field: %s", nameStr) + } + defValue := fieldDef.DefaultValue() + if defValue != nil { + return defValue + } + return types.NewErr("no default for type: %s", fieldDef.TypeName()) +} + +// Type returns the CEL type value of the object. +func (o *ObjectValue) Type() ref.Type { + return o.objectType +} + +// Value returns the Go-native representation of the object. +func (o *ObjectValue) Value() interface{} { + return o +} + +// NewMapValue returns an empty MapValue. +func NewMapValue() *MapValue { + return &MapValue{ + structValue: newStructValue(), + } +} + +// MapValue declares an object with a set of named fields whose values are dynamically typed. +type MapValue struct { + *structValue +} + +// ConvertToObject produces an ObjectValue from the MapValue with the associated schema type. +// +// The conversion is shallow and the memory shared between the Object and Map as all references +// to the map are expected to be replaced with the Object reference. +func (m *MapValue) ConvertToObject(declType *DeclType) *ObjectValue { + return &ObjectValue{ + structValue: m.structValue, + objectType: declType, + } +} + +// Contains returns whether the given key is contained in the MapValue. +func (m *MapValue) Contains(key ref.Val) ref.Val { + v, found := m.Find(key) + if v != nil && types.IsUnknownOrError(v) { + return v + } + return celBool(found) +} + +// ConvertToType converts the MapValue to another CEL type, if possible. +func (m *MapValue) ConvertToType(t ref.Type) ref.Val { + switch t { + case types.MapType: + return m + case types.TypeType: + return types.MapType + } + return types.NewErr("type conversion error from '%s' to '%s'", m.Type(), t) +} + +// Equal returns true if the maps are of the same size, have the same keys, and the key-values +// from each map are equal. +func (m *MapValue) Equal(other ref.Val) ref.Val { + oMap, isMap := other.(traits.Mapper) + if !isMap { + return types.MaybeNoSuchOverloadErr(other) + } + if m.Size() != oMap.Size() { + return types.False + } + for name, field := range m.fieldMap { + k := types.String(name) + ov, found := oMap.Find(k) + if !found { + return types.False + } + v := field.Ref.ExprValue() + vEq := v.Equal(ov) + if vEq != types.True { + return vEq + } + } + return types.True +} + +// Find returns the value for the key in the map, if found. +func (m *MapValue) Find(name ref.Val) (ref.Val, bool) { + // Currently only maps with string keys are supported as this is best aligned with JSON, + // and also much simpler to support. + n, ok := name.(types.String) + if !ok { + return types.MaybeNoSuchOverloadErr(n), true + } + nameStr := string(n) + field, found := m.fieldMap[nameStr] + if found { + return field.Ref.ExprValue(), true + } + return nil, false +} + +// Get returns the value for the key in the map, or error if not found. +func (m *MapValue) Get(key ref.Val) ref.Val { + v, found := m.Find(key) + if found { + return v + } + return types.ValOrErr(key, "no such key: %v", key) +} + +// Iterator produces a traits.Iterator which walks over the map keys. +// +// The Iterator is frequently used within comprehensions. +func (m *MapValue) Iterator() traits.Iterator { + keys := make([]ref.Val, len(m.fieldMap)) + i := 0 + for k := range m.fieldMap { + keys[i] = types.String(k) + i++ + } + return &baseMapIterator{ + baseVal: &baseVal{}, + keys: keys, + } +} + +// Size returns the number of keys in the map. +func (m *MapValue) Size() ref.Val { + return types.Int(len(m.Fields)) +} + +// Type returns the CEL ref.Type for the map. +func (m *MapValue) Type() ref.Type { + return types.MapType +} + +// Value returns the Go-native representation of the MapValue. +func (m *MapValue) Value() interface{} { + return m +} + +type baseMapIterator struct { + *baseVal + keys []ref.Val + idx int +} + +// HasNext implements the traits.Iterator interface method. +func (it *baseMapIterator) HasNext() ref.Val { + if it.idx < len(it.keys) { + return types.True + } + return types.False +} + +// Next implements the traits.Iterator interface method. +func (it *baseMapIterator) Next() ref.Val { + key := it.keys[it.idx] + it.idx++ + return key +} + +// Type implements the CEL ref.Val interface metohd. +func (it *baseMapIterator) Type() ref.Type { + return types.IteratorType +} + +// NewField returns a MapField instance with an empty DynValue that refers to the +// specified parse node id and field name. +func NewField(id int64, name string) *Field { + return &Field{ + ID: id, + Name: name, + Ref: NewEmptyDynValue(), + } +} + +// Field specifies a field name and a reference to a dynamic value. +type Field struct { + ID int64 + Name string + Ref *DynValue +} + +// NewListValue returns an empty ListValue instance. +func NewListValue() *ListValue { + return &ListValue{ + Entries: []*DynValue{}, + } +} + +// ListValue contains a list of dynamically typed entries. +type ListValue struct { + Entries []*DynValue + initValueSet sync.Once + valueSet map[ref.Val]struct{} +} + +// Add concatenates two lists together to produce a new CEL list value. +func (lv *ListValue) Add(other ref.Val) ref.Val { + oArr, isArr := other.(traits.Lister) + if !isArr { + return types.MaybeNoSuchOverloadErr(other) + } + szRight := len(lv.Entries) + szLeft := int(oArr.Size().(types.Int)) + sz := szRight + szLeft + combo := make([]ref.Val, sz) + for i := 0; i < szRight; i++ { + combo[i] = lv.Entries[i].ExprValue() + } + for i := 0; i < szLeft; i++ { + combo[i+szRight] = oArr.Get(types.Int(i)) + } + return types.DefaultTypeAdapter.NativeToValue(combo) +} + +// Append adds another entry into the ListValue. +func (lv *ListValue) Append(entry *DynValue) { + lv.Entries = append(lv.Entries, entry) + // The append resets all previously built indices. + lv.initValueSet = sync.Once{} +} + +// Contains returns whether the input `val` is equal to an element in the list. +// +// If any pair-wise comparison between the input value and the list element is an error, the +// operation will return an error. +func (lv *ListValue) Contains(val ref.Val) ref.Val { + if types.IsUnknownOrError(val) { + return val + } + lv.initValueSet.Do(lv.finalizeValueSet) + if lv.valueSet != nil { + _, found := lv.valueSet[val] + if found { + return types.True + } + // Instead of returning false, ensure that CEL's heterogeneous equality constraint + // is satisfied by allowing pair-wise equality behavior to determine the outcome. + } + var err ref.Val + sz := len(lv.Entries) + for i := 0; i < sz; i++ { + elem := lv.Entries[i] + cmp := elem.Equal(val) + b, ok := cmp.(types.Bool) + if !ok && err == nil { + err = types.MaybeNoSuchOverloadErr(cmp) + } + if b == types.True { + return types.True + } + } + if err != nil { + return err + } + return types.False +} + +// ConvertToNative is an implementation of the CEL ref.Val method used to adapt between CEL types +// and Go-native array-like types. +func (lv *ListValue) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + // Non-list conversion. + if typeDesc.Kind() != reflect.Slice && + typeDesc.Kind() != reflect.Array && + typeDesc.Kind() != reflect.Interface { + return nil, fmt.Errorf("type conversion error from list to '%v'", typeDesc) + } + + // If the list is already assignable to the desired type return it. + if reflect.TypeOf(lv).AssignableTo(typeDesc) { + return lv, nil + } + + // List conversion. + otherElem := typeDesc.Elem() + + // Allow the element ConvertToNative() function to determine whether conversion is possible. + sz := len(lv.Entries) + nativeList := reflect.MakeSlice(typeDesc, int(sz), int(sz)) + for i := 0; i < sz; i++ { + elem := lv.Entries[i] + nativeElemVal, err := elem.ConvertToNative(otherElem) + if err != nil { + return nil, err + } + nativeList.Index(int(i)).Set(reflect.ValueOf(nativeElemVal)) + } + return nativeList.Interface(), nil +} + +// ConvertToType converts the ListValue to another CEL type. +func (lv *ListValue) ConvertToType(t ref.Type) ref.Val { + switch t { + case types.ListType: + return lv + case types.TypeType: + return types.ListType + } + return types.NewErr("type conversion error from '%s' to '%s'", ListType, t) +} + +// Equal returns true if two lists are of the same size, and the values at each index are also +// equal. +func (lv *ListValue) Equal(other ref.Val) ref.Val { + oArr, isArr := other.(traits.Lister) + if !isArr { + return types.MaybeNoSuchOverloadErr(other) + } + sz := types.Int(len(lv.Entries)) + if sz != oArr.Size() { + return types.False + } + for i := types.Int(0); i < sz; i++ { + cmp := lv.Get(i).Equal(oArr.Get(i)) + if cmp != types.True { + return cmp + } + } + return types.True +} + +// Get returns the value at the given index. +// +// If the index is negative or greater than the size of the list, an error is returned. +func (lv *ListValue) Get(idx ref.Val) ref.Val { + iv, isInt := idx.(types.Int) + if !isInt { + return types.ValOrErr(idx, "unsupported index: %v", idx) + } + i := int(iv) + if i < 0 || i >= len(lv.Entries) { + return types.NewErr("index out of bounds: %v", idx) + } + return lv.Entries[i].ExprValue() +} + +// Iterator produces a traits.Iterator suitable for use in CEL comprehension macros. +func (lv *ListValue) Iterator() traits.Iterator { + return &baseListIterator{ + getter: lv.Get, + sz: len(lv.Entries), + } +} + +// Size returns the number of elements in the list. +func (lv *ListValue) Size() ref.Val { + return types.Int(len(lv.Entries)) +} + +// Type returns the CEL ref.Type for the list. +func (lv *ListValue) Type() ref.Type { + return types.ListType +} + +// Value returns the Go-native value. +func (lv *ListValue) Value() interface{} { + return lv +} + +// finalizeValueSet inspects the ListValue entries in order to make internal optimizations once all list +// entries are known. +func (lv *ListValue) finalizeValueSet() { + valueSet := make(map[ref.Val]struct{}) + for _, e := range lv.Entries { + switch e.value.(type) { + case bool, float64, int64, string, uint64, types.Null, PlainTextValue: + valueSet[e.ExprValue()] = struct{}{} + default: + lv.valueSet = nil + return + } + } + lv.valueSet = valueSet +} + +type baseVal struct{} + +func (*baseVal) ConvertToNative(typeDesc reflect.Type) (interface{}, error) { + return nil, fmt.Errorf("unsupported native conversion to: %v", typeDesc) +} + +func (*baseVal) ConvertToType(t ref.Type) ref.Val { + return types.NewErr("unsupported type conversion to: %v", t) +} + +func (*baseVal) Equal(other ref.Val) ref.Val { + return types.NewErr("unsupported equality test between instances") +} + +func (v *baseVal) Value() interface{} { + return nil +} + +type baseListIterator struct { + *baseVal + getter func(idx ref.Val) ref.Val + sz int + idx int +} + +func (it *baseListIterator) HasNext() ref.Val { + if it.idx < it.sz { + return types.True + } + return types.False +} + +func (it *baseListIterator) Next() ref.Val { + v := it.getter(types.Int(it.idx)) + it.idx++ + return v +} + +func (it *baseListIterator) Type() ref.Type { + return types.IteratorType +} + +func celBool(pred bool) ref.Val { + if pred { + return types.True + } + return types.False +} + +var unknownType = &DeclType{name: "unknown", MinSerializedSize: 1} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS b/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS new file mode 100644 index 00000000..87c80edb --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - sttts diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go new file mode 100644 index 00000000..8f4e60f5 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/context.go @@ -0,0 +1,78 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" +) + +// The key type is unexported to prevent collisions +type key int + +const ( + // namespaceKey is the context key for the request namespace. + namespaceKey key = iota + + // userKey is the context key for the request user. + userKey +) + +// NewContext instantiates a base context object for request flows. +func NewContext() context.Context { + return context.TODO() +} + +// NewDefaultContext instantiates a base context object for request flows in the default namespace +func NewDefaultContext() context.Context { + return WithNamespace(NewContext(), metav1.NamespaceDefault) +} + +// WithValue returns a copy of parent in which the value associated with key is val. +func WithValue(parent context.Context, key interface{}, val interface{}) context.Context { + return context.WithValue(parent, key, val) +} + +// WithNamespace returns a copy of parent in which the namespace value is set +func WithNamespace(parent context.Context, namespace string) context.Context { + return WithValue(parent, namespaceKey, namespace) +} + +// NamespaceFrom returns the value of the namespace key on the ctx +func NamespaceFrom(ctx context.Context) (string, bool) { + namespace, ok := ctx.Value(namespaceKey).(string) + return namespace, ok +} + +// NamespaceValue returns the value of the namespace key on the ctx, or the empty string if none +func NamespaceValue(ctx context.Context) string { + namespace, _ := NamespaceFrom(ctx) + return namespace +} + +// WithUser returns a copy of parent in which the user value is set +func WithUser(parent context.Context, user user.Info) context.Context { + return WithValue(parent, userKey, user) +} + +// UserFrom returns the value of the user key on the ctx +func UserFrom(ctx context.Context) (user.Info, bool) { + user, ok := ctx.Value(userKey).(user.Info) + return user, ok +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go new file mode 100644 index 00000000..96da6f2b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package request contains everything around extracting info from +// a http request object. +// TODO: this package is temporary. Handlers must move into pkg/apiserver/handlers to avoid dependency cycle +package request // import "k8s.io/apiserver/pkg/endpoints/request" diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go new file mode 100644 index 00000000..7d58cf3a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/received_time.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "time" +) + +type requestReceivedTimestampKeyType int + +// requestReceivedTimestampKey is the ReceivedTimestamp (the time the request reached the apiserver) +// key for the context. +const requestReceivedTimestampKey requestReceivedTimestampKeyType = iota + +// WithReceivedTimestamp returns a copy of parent context in which the ReceivedTimestamp +// (the time the request reached the apiserver) is set. +// +// If the specified ReceivedTimestamp is zero, no value is set and the parent context is returned as is. +func WithReceivedTimestamp(parent context.Context, receivedTimestamp time.Time) context.Context { + if receivedTimestamp.IsZero() { + return parent + } + return WithValue(parent, requestReceivedTimestampKey, receivedTimestamp) +} + +// ReceivedTimestampFrom returns the value of the ReceivedTimestamp key from the specified context. +func ReceivedTimestampFrom(ctx context.Context) (time.Time, bool) { + info, ok := ctx.Value(requestReceivedTimestampKey).(time.Time) + return info, ok +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go new file mode 100644 index 00000000..808943d1 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go @@ -0,0 +1,305 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "fmt" + "net/http" + "strings" + + "k8s.io/apimachinery/pkg/api/validation/path" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + + "k8s.io/klog/v2" +) + +// LongRunningRequestCheck is a predicate which is true for long-running http requests. +type LongRunningRequestCheck func(r *http.Request, requestInfo *RequestInfo) bool + +type RequestInfoResolver interface { + NewRequestInfo(req *http.Request) (*RequestInfo, error) +} + +// RequestInfo holds information parsed from the http.Request +type RequestInfo struct { + // IsResourceRequest indicates whether or not the request is for an API resource or subresource + IsResourceRequest bool + // Path is the URL path of the request + Path string + // Verb is the kube verb associated with the request for API requests, not the http verb. This includes things like list and watch. + // for non-resource requests, this is the lowercase http verb + Verb string + + APIPrefix string + APIGroup string + APIVersion string + Namespace string + // Resource is the name of the resource being requested. This is not the kind. For example: pods + Resource string + // Subresource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. + // For instance, /pods has the resource "pods" and the kind "Pod", while /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" + // (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource "binding", and kind "Binding". + Subresource string + // Name is empty for some verbs, but if the request directly indicates a name (not in body content) then this field is filled in. + Name string + // Parts are the path parts for the request, always starting with /{resource}/{name} + Parts []string + + // FieldSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + FieldSelector string + // LabelSelector contains the unparsed field selector from a request. It is only present if the apiserver + // honors field selectors for the verb this request is associated with. + LabelSelector string +} + +// specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal +// CRUDdy GET/POST/PUT/DELETE actions on REST objects. +// TODO: find a way to keep this up to date automatically. Maybe dynamically populate list as handlers added to +// master's Mux. +var specialVerbs = sets.NewString("proxy", "watch") + +// specialVerbsNoSubresources contains root verbs which do not allow subresources +var specialVerbsNoSubresources = sets.NewString("proxy") + +// namespaceSubresources contains subresources of namespace +// this list allows the parser to distinguish between a namespace subresource, and a namespaced resource +var namespaceSubresources = sets.NewString("status", "finalize") + +// verbsWithSelectors is the list of verbs which support fieldSelector and labelSelector parameters +var verbsWithSelectors = sets.NewString("list", "watch", "deletecollection") + +// NamespaceSubResourcesForTest exports namespaceSubresources for testing in pkg/controlplane/master_test.go, so we never drift +var NamespaceSubResourcesForTest = sets.NewString(namespaceSubresources.List()...) + +type RequestInfoFactory struct { + APIPrefixes sets.String // without leading and trailing slashes + GrouplessAPIPrefixes sets.String // without leading and trailing slashes +} + +// TODO write an integration test against the swagger doc to test the RequestInfo and match up behavior to responses +// NewRequestInfo returns the information from the http request. If error is not nil, RequestInfo holds the information as best it is known before the failure +// It handles both resource and non-resource requests and fills in all the pertinent information for each. +// Valid Inputs: +// Resource paths +// /apis/{api-group}/{version}/namespaces +// /api/{version}/namespaces +// /api/{version}/namespaces/{namespace} +// /api/{version}/namespaces/{namespace}/{resource} +// /api/{version}/namespaces/{namespace}/{resource}/{resourceName} +// /api/{version}/{resource} +// /api/{version}/{resource}/{resourceName} +// +// Special verbs without subresources: +// /api/{version}/proxy/{resource}/{resourceName} +// /api/{version}/proxy/namespaces/{namespace}/{resource}/{resourceName} +// +// Special verbs with subresources: +// /api/{version}/watch/{resource} +// /api/{version}/watch/namespaces/{namespace}/{resource} +// +// NonResource paths +// /apis/{api-group}/{version} +// /apis/{api-group} +// /apis +// /api/{version} +// /api +// /healthz +// / +func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, error) { + // start with a non-resource request until proven otherwise + requestInfo := RequestInfo{ + IsResourceRequest: false, + Path: req.URL.Path, + Verb: strings.ToLower(req.Method), + } + + currentParts := splitPath(req.URL.Path) + if len(currentParts) < 3 { + // return a non-resource request + return &requestInfo, nil + } + + if !r.APIPrefixes.Has(currentParts[0]) { + // return a non-resource request + return &requestInfo, nil + } + requestInfo.APIPrefix = currentParts[0] + currentParts = currentParts[1:] + + if !r.GrouplessAPIPrefixes.Has(requestInfo.APIPrefix) { + // one part (APIPrefix) has already been consumed, so this is actually "do we have four parts?" + if len(currentParts) < 3 { + // return a non-resource request + return &requestInfo, nil + } + + requestInfo.APIGroup = currentParts[0] + currentParts = currentParts[1:] + } + + requestInfo.IsResourceRequest = true + requestInfo.APIVersion = currentParts[0] + currentParts = currentParts[1:] + + // handle input of form /{specialVerb}/* + verbViaPathPrefix := false + if specialVerbs.Has(currentParts[0]) { + if len(currentParts) < 2 { + return &requestInfo, fmt.Errorf("unable to determine kind and namespace from url, %v", req.URL) + } + + requestInfo.Verb = currentParts[0] + currentParts = currentParts[1:] + verbViaPathPrefix = true + + } else { + switch req.Method { + case "POST": + requestInfo.Verb = "create" + case "GET", "HEAD": + requestInfo.Verb = "get" + case "PUT": + requestInfo.Verb = "update" + case "PATCH": + requestInfo.Verb = "patch" + case "DELETE": + requestInfo.Verb = "delete" + default: + requestInfo.Verb = "" + } + } + + // URL forms: /namespaces/{namespace}/{kind}/*, where parts are adjusted to be relative to kind + if currentParts[0] == "namespaces" { + if len(currentParts) > 1 { + requestInfo.Namespace = currentParts[1] + + // if there is another step after the namespace name and it is not a known namespace subresource + // move currentParts to include it as a resource in its own right + if len(currentParts) > 2 && !namespaceSubresources.Has(currentParts[2]) { + currentParts = currentParts[2:] + } + } + } else { + requestInfo.Namespace = metav1.NamespaceNone + } + + // parsing successful, so we now know the proper value for .Parts + requestInfo.Parts = currentParts + + // parts look like: resource/resourceName/subresource/other/stuff/we/don't/interpret + switch { + case len(requestInfo.Parts) >= 3 && !specialVerbsNoSubresources.Has(requestInfo.Verb): + requestInfo.Subresource = requestInfo.Parts[2] + fallthrough + case len(requestInfo.Parts) >= 2: + requestInfo.Name = requestInfo.Parts[1] + fallthrough + case len(requestInfo.Parts) >= 1: + requestInfo.Resource = requestInfo.Parts[0] + } + + // if there's no name on the request and we thought it was a get before, then the actual verb is a list or a watch + if len(requestInfo.Name) == 0 && requestInfo.Verb == "get" { + opts := metainternalversion.ListOptions{} + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { + // An error in parsing request will result in default to "list" and not setting "name" field. + klog.ErrorS(err, "Couldn't parse request", "request", req.URL.Query()) + // Reset opts to not rely on partial results from parsing. + // However, if watch is set, let's report it. + opts = metainternalversion.ListOptions{} + if values := req.URL.Query()["watch"]; len(values) > 0 { + switch strings.ToLower(values[0]) { + case "false", "0": + default: + opts.Watch = true + } + } + } + + if opts.Watch { + requestInfo.Verb = "watch" + } else { + requestInfo.Verb = "list" + } + + if opts.FieldSelector != nil { + if name, ok := opts.FieldSelector.RequiresExactMatch("metadata.name"); ok { + if len(path.IsValidPathSegmentName(name)) == 0 { + requestInfo.Name = name + } + } + } + } + + // if there's no name on the request and we thought it was a delete before, then the actual verb is deletecollection + if len(requestInfo.Name) == 0 && requestInfo.Verb == "delete" { + requestInfo.Verb = "deletecollection" + } + + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + // Don't support selector authorization on requests that used the deprecated verb-via-path mechanism, since they don't support selectors consistently. + // There are multi-object and single-object watch endpoints, and only the multi-object one supports selectors. + if !verbViaPathPrefix && verbsWithSelectors.Has(requestInfo.Verb) { + // interestingly these are parsed above, but the current structure there means that if one (or anything) in the + // listOptions fails to decode, the field and label selectors are lost. + // therefore, do the straight query param read here. + if vals := req.URL.Query()["fieldSelector"]; len(vals) > 0 { + requestInfo.FieldSelector = vals[0] + } + if vals := req.URL.Query()["labelSelector"]; len(vals) > 0 { + requestInfo.LabelSelector = vals[0] + } + } + } + + return &requestInfo, nil +} + +type requestInfoKeyType int + +// requestInfoKey is the RequestInfo key for the context. It's of private type here. Because +// keys are interfaces and interfaces are equal when the type and the value is equal, this +// does not conflict with the keys defined in pkg/api. +const requestInfoKey requestInfoKeyType = iota + +// WithRequestInfo returns a copy of parent in which the request info value is set +func WithRequestInfo(parent context.Context, info *RequestInfo) context.Context { + return WithValue(parent, requestInfoKey, info) +} + +// RequestInfoFrom returns the value of the RequestInfo key on the ctx +func RequestInfoFrom(ctx context.Context) (*RequestInfo, bool) { + info, ok := ctx.Value(requestInfoKey).(*RequestInfo) + return info, ok +} + +// splitPath returns the segments for a URL path. +func splitPath(path string) []string { + path = strings.Trim(path, "/") + if path == "" { + return []string{} + } + return strings.Split(path, "/") +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go new file mode 100644 index 00000000..d06275b8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/server_shutdown_signal.go @@ -0,0 +1,55 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" +) + +// The serverShutdownSignalKeyType type is unexported to prevent collisions +type serverShutdownSignalKeyType int + +// serverShutdownSignalKey is the context key for storing the +// watch termination interface instance for a WATCH request. +const serverShutdownSignalKey serverShutdownSignalKeyType = iota + +// ServerShutdownSignal is associated with the request context so +// the request handler logic has access to signals rlated to +// the server shutdown events +type ServerShutdownSignal interface { + // Signaled when the apiserver is not receiving any new request + ShuttingDown() <-chan struct{} +} + +// ServerShutdownSignalFrom returns the ServerShutdownSignal instance +// associated with the request context. +// If there is no ServerShutdownSignal asscoaied with the context, +// nil is returned. +func ServerShutdownSignalFrom(ctx context.Context) ServerShutdownSignal { + ev, _ := ctx.Value(serverShutdownSignalKey).(ServerShutdownSignal) + return ev +} + +// WithServerShutdownSignal returns a new context that stores +// the ServerShutdownSignal interface instance. +func WithServerShutdownSignal(parent context.Context, window ServerShutdownSignal) context.Context { + if ServerShutdownSignalFrom(parent) != nil { + return parent // Avoid double registering. + } + + return context.WithValue(parent, serverShutdownSignalKey, window) +} diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go b/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go new file mode 100644 index 00000000..435af8e7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/endpoints/request/webhook_duration.go @@ -0,0 +1,311 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package request + +import ( + "context" + "sync" + "time" + + "k8s.io/utils/clock" +) + +func sumDuration(d1 time.Duration, d2 time.Duration) time.Duration { + return d1 + d2 +} + +func maxDuration(d1 time.Duration, d2 time.Duration) time.Duration { + if d1 > d2 { + return d1 + } + return d2 +} + +// DurationTracker is a simple interface for tracking functions duration, +// it is safe for concurrent use by multiple goroutines. +type DurationTracker interface { + // Track measures time spent in the given function f and + // aggregates measured duration using aggregateFunction. + // if Track is invoked with f from multiple goroutines concurrently, + // then f must be safe to be invoked concurrently by multiple goroutines. + Track(f func()) + + // TrackDuration tracks latency from the specified duration + // and aggregate it using aggregateFunction + TrackDuration(time.Duration) + + // GetLatency returns the total latency incurred so far + GetLatency() time.Duration +} + +// durationTracker implements DurationTracker by measuring function time +// using given clock and aggregates the duration using given aggregate function +type durationTracker struct { + clock clock.Clock + latency time.Duration + mu sync.Mutex + aggregateFunction func(time.Duration, time.Duration) time.Duration +} + +// Track measures time spent in given function and aggregates measured +// duration using aggregateFunction +func (t *durationTracker) Track(f func()) { + startedAt := t.clock.Now() + defer func() { + duration := t.clock.Since(startedAt) + t.mu.Lock() + defer t.mu.Unlock() + t.latency = t.aggregateFunction(t.latency, duration) + }() + + f() +} + +// TrackDuration tracks latency from the given duration +// using aggregateFunction +func (t *durationTracker) TrackDuration(d time.Duration) { + t.mu.Lock() + defer t.mu.Unlock() + t.latency = t.aggregateFunction(t.latency, d) +} + +// GetLatency returns aggregated latency tracked by a tracker +func (t *durationTracker) GetLatency() time.Duration { + t.mu.Lock() + defer t.mu.Unlock() + return t.latency +} + +func newSumLatencyTracker(c clock.Clock) DurationTracker { + return &durationTracker{ + clock: c, + aggregateFunction: sumDuration, + } +} + +func newMaxLatencyTracker(c clock.Clock) DurationTracker { + return &durationTracker{ + clock: c, + aggregateFunction: maxDuration, + } +} + +// LatencyTrackers stores trackers used to measure latecny incurred in +// components within the apiserver. +type LatencyTrackers struct { + // MutatingWebhookTracker tracks the latency incurred in mutating webhook(s). + // Since mutating webhooks are done sequentially, latency + // is aggregated using sum function. + MutatingWebhookTracker DurationTracker + + // ValidatingWebhookTracker tracks the latency incurred in validating webhook(s). + // Validate webhooks are done in parallel, so max function is used. + ValidatingWebhookTracker DurationTracker + + // APFQueueWaitTracker tracks the latency incurred by queue wait times + // from priority & fairness. + APFQueueWaitTracker DurationTracker + + // StorageTracker tracks the latency incurred inside the storage layer, + // it accounts for the time it takes to send data to the underlying + // storage layer (etcd) and get the complete response back. + // If a request involves N (N>=1) round trips to the underlying + // stogare layer, the latency will account for the total duration + // from these N round trips. + // It does not include the time incurred in admission, or validation. + StorageTracker DurationTracker + + // TransformTracker tracks the latency incurred in transforming the + // response object(s) returned from the underlying storage layer. + // This includes transforming the object to user's desired form + // (ie. as Table), and also setting appropriate API level fields. + // This does not include the latency incurred in serialization + // (json or protobuf) of the response object or writing + // of it to the http ResponseWriter object. + TransformTracker DurationTracker + + // SerializationTracker tracks the latency incurred in serialization + // (json or protobuf) of the response object. + // NOTE: serialization and writing of the serialized raw bytes to the + // associated http ResponseWriter object are interleaved, and hence + // the latency measured here will include the time spent writing the + // serialized raw bytes to the http ResponseWriter object. + SerializationTracker DurationTracker + + // ResponseWriteTracker tracks the latency incurred in writing the + // serialized raw bytes to the http ResponseWriter object (via the + // Write method) associated with the request. + // The Write method can be invoked multiple times, so we use a + // latency tracker that sums up the duration from each call. + ResponseWriteTracker DurationTracker + + // DecodeTracker is used to track latency incurred inside the function + // that takes an object returned from the underlying storage layer + // (etcd) and performs decoding of the response object. + // When called multiple times, the latency incurred inside to + // decode func each time will be summed up. + DecodeTracker DurationTracker +} + +type latencyTrackersKeyType int + +// latencyTrackersKey is the key that associates a LatencyTrackers +// instance with the request context. +const latencyTrackersKey latencyTrackersKeyType = iota + +// WithLatencyTrackers returns a copy of parent context to which an +// instance of LatencyTrackers is added. +func WithLatencyTrackers(parent context.Context) context.Context { + return WithLatencyTrackersAndCustomClock(parent, clock.RealClock{}) +} + +// WithLatencyTrackersAndCustomClock returns a copy of parent context to which +// an instance of LatencyTrackers is added. Tracers use given clock. +func WithLatencyTrackersAndCustomClock(parent context.Context, c clock.Clock) context.Context { + return WithValue(parent, latencyTrackersKey, &LatencyTrackers{ + MutatingWebhookTracker: newSumLatencyTracker(c), + ValidatingWebhookTracker: newMaxLatencyTracker(c), + APFQueueWaitTracker: newMaxLatencyTracker(c), + StorageTracker: newSumLatencyTracker(c), + TransformTracker: newSumLatencyTracker(c), + SerializationTracker: newSumLatencyTracker(c), + ResponseWriteTracker: newSumLatencyTracker(c), + DecodeTracker: newSumLatencyTracker(c), + }) +} + +// LatencyTrackersFrom returns the associated LatencyTrackers instance +// from the specified context. +func LatencyTrackersFrom(ctx context.Context) (*LatencyTrackers, bool) { + wd, ok := ctx.Value(latencyTrackersKey).(*LatencyTrackers) + return wd, ok && wd != nil +} + +// TrackTransformResponseObjectLatency is used to track latency incurred +// inside the function that takes an object returned from the underlying +// storage layer (etcd) and performs any necessary transformations +// of the response object. This does not include the latency incurred in +// serialization (json or protobuf) of the response object or writing of +// it to the http ResponseWriter object. +// When called multiple times, the latency incurred inside the +// transform func each time will be summed up. +func TrackTransformResponseObjectLatency(ctx context.Context, transform func()) { + if tracker, ok := LatencyTrackersFrom(ctx); ok { + tracker.TransformTracker.Track(transform) + return + } + + transform() +} + +// TrackStorageLatency is used to track latency incurred +// inside the underlying storage layer. +// When called multiple times, the latency provided will be summed up. +func TrackStorageLatency(ctx context.Context, d time.Duration) { + if tracker, ok := LatencyTrackersFrom(ctx); ok { + tracker.StorageTracker.TrackDuration(d) + } +} + +// TrackSerializeResponseObjectLatency is used to track latency incurred in +// serialization (json or protobuf) of the response object. +// When called multiple times, the latency provided will be summed up. +func TrackSerializeResponseObjectLatency(ctx context.Context, f func()) { + if tracker, ok := LatencyTrackersFrom(ctx); ok { + tracker.SerializationTracker.Track(f) + return + } + + f() +} + +// TrackResponseWriteLatency is used to track latency incurred in writing +// the serialized raw bytes to the http ResponseWriter object (via the +// Write method) associated with the request. +// When called multiple times, the latency provided will be summed up. +func TrackResponseWriteLatency(ctx context.Context, d time.Duration) { + if tracker, ok := LatencyTrackersFrom(ctx); ok { + tracker.ResponseWriteTracker.TrackDuration(d) + } +} + +// TrackAPFQueueWaitLatency is used to track latency incurred +// by priority and fairness queues. +func TrackAPFQueueWaitLatency(ctx context.Context, d time.Duration) { + if tracker, ok := LatencyTrackersFrom(ctx); ok { + tracker.APFQueueWaitTracker.TrackDuration(d) + } +} + +// TrackDecodeLatency is used to track latency incurred inside the function +// that takes an object returned from the underlying storage layer +// (etcd) and performs decoding of the response object. +// When called multiple times, the latency incurred inside to +// decode func each time will be summed up. +func TrackDecodeLatency(ctx context.Context, d time.Duration) { + if tracker, ok := LatencyTrackersFrom(ctx); ok { + tracker.DecodeTracker.TrackDuration(d) + } +} + +// AuditAnnotationsFromLatencyTrackers will inspect each latency tracker +// associated with the request context and return a set of audit +// annotations that can be added to the API audit entry. +func AuditAnnotationsFromLatencyTrackers(ctx context.Context) map[string]string { + const ( + transformLatencyKey = "apiserver.latency.k8s.io/transform-response-object" + storageLatencyKey = "apiserver.latency.k8s.io/etcd" + serializationLatencyKey = "apiserver.latency.k8s.io/serialize-response-object" + responseWriteLatencyKey = "apiserver.latency.k8s.io/response-write" + mutatingWebhookLatencyKey = "apiserver.latency.k8s.io/mutating-webhook" + validatingWebhookLatencyKey = "apiserver.latency.k8s.io/validating-webhook" + decodeLatencyKey = "apiserver.latency.k8s.io/decode-response-object" + apfQueueWaitLatencyKey = "apiserver.latency.k8s.io/apf-queue-wait" + ) + + tracker, ok := LatencyTrackersFrom(ctx) + if !ok { + return nil + } + + annotations := map[string]string{} + if latency := tracker.TransformTracker.GetLatency(); latency != 0 { + annotations[transformLatencyKey] = latency.String() + } + if latency := tracker.StorageTracker.GetLatency(); latency != 0 { + annotations[storageLatencyKey] = latency.String() + } + if latency := tracker.SerializationTracker.GetLatency(); latency != 0 { + annotations[serializationLatencyKey] = latency.String() + } + if latency := tracker.ResponseWriteTracker.GetLatency(); latency != 0 { + annotations[responseWriteLatencyKey] = latency.String() + } + if latency := tracker.MutatingWebhookTracker.GetLatency(); latency != 0 { + annotations[mutatingWebhookLatencyKey] = latency.String() + } + if latency := tracker.ValidatingWebhookTracker.GetLatency(); latency != 0 { + annotations[validatingWebhookLatencyKey] = latency.String() + } + if latency := tracker.DecodeTracker.GetLatency(); latency != 0 { + annotations[decodeLatencyKey] = latency.String() + } + if latency := tracker.APFQueueWaitTracker.GetLatency(); latency != 0 { + annotations[apfQueueWaitLatencyKey] = latency.String() + } + return annotations +} diff --git a/vendor/k8s.io/apiserver/pkg/features/OWNERS b/vendor/k8s.io/apiserver/pkg/features/OWNERS new file mode 100644 index 00000000..3e1dd9f0 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - feature-approvers diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go new file mode 100644 index 00000000..27d9761b --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -0,0 +1,428 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "k8s.io/apimachinery/pkg/util/runtime" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" +) + +const ( + // Every feature gate should add method here following this template: + // + // // owner: @username + // // alpha: v1.4 + // MyFeature featuregate.Feature = "MyFeature" + // + // Feature gates should be listed in alphabetical, case-sensitive + // (upper before any lower case character) order. This reduces the risk + // of code conflicts because changes are more likely to be scattered + // across the file. + + // owner: @ivelichkovich, @tallclair + // alpha: v1.27 + // beta: v1.28 + // stable: v1.30 + // kep: https://kep.k8s.io/3716 + // + // Enables usage of MatchConditions fields to use CEL expressions for matching on admission webhooks + AdmissionWebhookMatchConditions featuregate.Feature = "AdmissionWebhookMatchConditions" + + // owner: @jefftree @alexzielenski + // alpha: v1.26 + // beta: v1.27 + // stable: v1.30 + // + // Enables an single HTTP endpoint /discovery/ which supports native HTTP + // caching with ETags containing all APIResources known to the apiserver. + AggregatedDiscoveryEndpoint featuregate.Feature = "AggregatedDiscoveryEndpoint" + + // owner: @vinayakankugoyal + // kep: https://kep.k8s.io/4633 + // alpha: v1.31 + // + // Allows us to enable anonymous auth for only certain apiserver endpoints. + AnonymousAuthConfigurableEndpoints featuregate.Feature = "AnonymousAuthConfigurableEndpoints" + + // owner: @smarterclayton + // alpha: v1.8 + // beta: v1.9 + // stable: 1.29 + // + // Allow API clients to retrieve resource lists in chunks rather than + // all at once. + APIListChunking featuregate.Feature = "APIListChunking" + + // owner: @ilackams + // alpha: v1.7 + // beta: v1.16 + // + // Enables compression of REST responses (GET and LIST only) + APIResponseCompression featuregate.Feature = "APIResponseCompression" + + // owner: @roycaihw + // alpha: v1.20 + // + // Assigns each kube-apiserver an ID in a cluster. + APIServerIdentity featuregate.Feature = "APIServerIdentity" + + // owner: @dashpole + // alpha: v1.22 + // beta: v1.27 + // + // Add support for distributed tracing in the API Server + APIServerTracing featuregate.Feature = "APIServerTracing" + + // owner: @linxiulei + // beta: v1.30 + // + // Enables serving watch requests in separate goroutines. + APIServingWithRoutine featuregate.Feature = "APIServingWithRoutine" + + // owner: @deads2k + // kep: https://kep.k8s.io/4601 + // alpha: v1.31 + // + // Allows authorization to use field and label selectors. + AuthorizeWithSelectors featuregate.Feature = "AuthorizeWithSelectors" + + // owner: @serathius + // beta: v1.31 + // Enables concurrent watch object decoding to avoid starving watch cache when conversion webhook is installed. + ConcurrentWatchObjectDecode featuregate.Feature = "ConcurrentWatchObjectDecode" + + // owner: @cici37 @jpbetz + // kep: http://kep.k8s.io/3488 + // alpha: v1.26 + // beta: v1.28 + // stable: v1.30 + // + // Note: the feature gate can be removed in 1.32 + // Enables expression validation in Admission Control + ValidatingAdmissionPolicy featuregate.Feature = "ValidatingAdmissionPolicy" + + // owner: @jefftree + // kep: https://kep.k8s.io/4355 + // alpha: v1.31 + // + // Enables coordinated leader election in the API server + CoordinatedLeaderElection featuregate.Feature = "CoordinatedLeaderElection" + + // alpha: v1.20 + // beta: v1.21 + // GA: v1.24 + // + // Allows for updating watchcache resource version with progress notify events. + EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption" + + // owner: @aramase + // kep: https://kep.k8s.io/3299 + // deprecated: v1.28 + // + // Enables KMS v1 API for encryption at rest. + KMSv1 featuregate.Feature = "KMSv1" + + // owner: @aramase + // kep: https://kep.k8s.io/3299 + // alpha: v1.25 + // beta: v1.27 + // stable: v1.29 + // + // Enables KMS v2 API for encryption at rest. + KMSv2 featuregate.Feature = "KMSv2" + + // owner: @enj + // kep: https://kep.k8s.io/3299 + // beta: v1.28 + // stable: v1.29 + // + // Enables the use of derived encryption keys with KMS v2. + KMSv2KDF featuregate.Feature = "KMSv2KDF" + + // owner: @alexzielenski, @cici37, @jiahuif + // kep: https://kep.k8s.io/3962 + // alpha: v1.30 + // + // Enables the MutatingAdmissionPolicy in Admission Chain + MutatingAdmissionPolicy featuregate.Feature = "MutatingAdmissionPolicy" + + // owner: @jiahuif + // kep: https://kep.k8s.io/2887 + // alpha: v1.23 + // beta: v1.24 + // + // Enables populating "enum" field of OpenAPI schemas + // in the spec returned from kube-apiserver. + OpenAPIEnums featuregate.Feature = "OpenAPIEnums" + + // owner: @caesarxuchao + // alpha: v1.15 + // beta: v1.16 + // stable: 1.29 + // + // Allow apiservers to show a count of remaining items in the response + // to a chunking list request. + RemainingItemCount featuregate.Feature = "RemainingItemCount" + + // owner: @wojtek-t + // beta: v1.31 + // + // Enables resilient watchcache initialization to avoid controlplane + // overload. + ResilientWatchCacheInitialization featuregate.Feature = "ResilientWatchCacheInitialization" + + // owner: @serathius + // beta: v1.30 + // + // Allow watch cache to create a watch on a dedicated RPC. + // This prevents watch cache from being starved by other watches. + SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" + + // owner: @apelisse, @lavalamp + // alpha: v1.14 + // beta: v1.16 + // stable: v1.22 + // + // Server-side apply. Merging happens on the server. + ServerSideApply featuregate.Feature = "ServerSideApply" + + // owner: @kevindelgado + // kep: https://kep.k8s.io/2885 + // alpha: v1.23 + // beta: v1.24 + // + // Enables server-side field validation. + ServerSideFieldValidation featuregate.Feature = "ServerSideFieldValidation" + + // owner: @enj + // beta: v1.29 + // + // Enables http2 DOS mitigations for unauthenticated clients. + // + // Some known reasons to disable these mitigations: + // + // An API server that is fronted by an L7 load balancer that is set up + // to mitigate http2 attacks may opt to disable this protection to prevent + // unauthenticated clients from disabling connection reuse between the load + // balancer and the API server (many incoming connections could share the + // same backend connection). + // + // An API server that is on a private network may opt to disable this + // protection to prevent performance regressions for unauthenticated + // clients. + UnauthenticatedHTTP2DOSMitigation featuregate.Feature = "UnauthenticatedHTTP2DOSMitigation" + + // owner: @jpbetz + // alpha: v1.30 + // Resource create requests using generateName are retried automatically by the apiserver + // if the generated name conflicts with an existing resource name, up to a maximum number of 7 retries. + RetryGenerateName featuregate.Feature = "RetryGenerateName" + + // owner: @cici37 + // alpha: v1.30 + // + // StrictCostEnforcementForVAP is used to apply strict CEL cost validation for ValidatingAdmissionPolicy. + // It will be set to off by default for certain time of period to prevent the impact on the existing users. + // It is strongly recommended to enable this feature gate as early as possible. + // The strict cost is specific for the extended libraries whose cost defined under k8s/apiserver/pkg/cel/library. + StrictCostEnforcementForVAP featuregate.Feature = "StrictCostEnforcementForVAP" + + // owner: @cici37 + // alpha: v1.30 + // + // StrictCostEnforcementForWebhooks is used to apply strict CEL cost validation for matchConditions in Webhooks. + // It will be set to off by default for certain time of period to prevent the impact on the existing users. + // It is strongly recommended to enable this feature gate as early as possible. + // The strict cost is specific for the extended libraries whose cost defined under k8s/apiserver/pkg/cel/library. + StrictCostEnforcementForWebhooks featuregate.Feature = "StrictCostEnforcementForWebhooks" + + // owner: @caesarxuchao @roycaihw + // alpha: v1.20 + // + // Enable the storage version API. + StorageVersionAPI featuregate.Feature = "StorageVersionAPI" + + // owner: @caesarxuchao + // alpha: v1.14 + // beta: v1.15 + // + // Allow apiservers to expose the storage version hash in the discovery + // document. + StorageVersionHash featuregate.Feature = "StorageVersionHash" + + // owner: @aramase, @enj, @nabokihms + // kep: https://kep.k8s.io/3331 + // alpha: v1.29 + // beta: v1.30 + // + // Enables Structured Authentication Configuration + StructuredAuthenticationConfiguration featuregate.Feature = "StructuredAuthenticationConfiguration" + + // owner: @palnabarun + // kep: https://kep.k8s.io/3221 + // alpha: v1.29 + // beta: v1.30 + // + // Enables Structured Authorization Configuration + StructuredAuthorizationConfiguration featuregate.Feature = "StructuredAuthorizationConfiguration" + + // owner: @wojtek-t + // alpha: v1.15 + // beta: v1.16 + // GA: v1.17 + // + // Enables support for watch bookmark events. + WatchBookmark featuregate.Feature = "WatchBookmark" + + // owner: @wojtek-t + // beta: v1.31 + // + // Enables post-start-hook for storage readiness + WatchCacheInitializationPostStartHook featuregate.Feature = "WatchCacheInitializationPostStartHook" + + // owner: @serathius + // beta: 1.30 + // Enables watches without resourceVersion to be served from storage. + // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. + WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" + + // owner: @vinaykul + // kep: http://kep.k8s.io/1287 + // alpha: v1.27 + // + // Enables In-Place Pod Vertical Scaling + InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling" + + // owner: @p0lyn0mial + // alpha: v1.27 + // + // Allow the API server to stream individual items instead of chunking + WatchList featuregate.Feature = "WatchList" + + // owner: @serathius + // kep: http://kep.k8s.io/2340 + // alpha: v1.28 + // beta: v1.31 + // + // Allow the API server to serve consistent lists from cache + ConsistentListFromCache featuregate.Feature = "ConsistentListFromCache" + + // owner: @tkashem + // beta: v1.29 + // GA: v1.30 + // + // Allow Priority & Fairness in the API server to use a zero value for + // the 'nominalConcurrencyShares' field of the 'limited' section of a + // priority level. + ZeroLimitedNominalConcurrencyShares featuregate.Feature = "ZeroLimitedNominalConcurrencyShares" +) + +func init() { + runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) + runtime.Must(utilfeature.DefaultMutableFeatureGate.AddVersioned(defaultVersionedKubernetesFeatureGates)) +} + +// defaultVersionedKubernetesFeatureGates consists of all known Kubernetes-specific feature keys with VersionedSpecs. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate.VersionedSpecs{ + // Example: + // EmulationVersion: { + // {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, + // }, +} + +// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. +// To add a new feature, define a key for it above and add it here. The features will be +// available throughout Kubernetes binaries. +var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ + + AnonymousAuthConfigurableEndpoints: {Default: false, PreRelease: featuregate.Alpha}, + + AggregatedDiscoveryEndpoint: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + + AdmissionWebhookMatchConditions: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.33 + + APIListChunking: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 + + APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, + + APIServerIdentity: {Default: true, PreRelease: featuregate.Beta}, + + APIServerTracing: {Default: true, PreRelease: featuregate.Beta}, + + APIServingWithRoutine: {Default: false, PreRelease: featuregate.Alpha}, + + AuthorizeWithSelectors: {Default: false, PreRelease: featuregate.Alpha}, + + ConcurrentWatchObjectDecode: {Default: false, PreRelease: featuregate.Beta}, + + ValidatingAdmissionPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 + + CoordinatedLeaderElection: {Default: false, PreRelease: featuregate.Alpha}, + + EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + + KMSv1: {Default: false, PreRelease: featuregate.Deprecated}, + + KMSv2: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + + KMSv2KDF: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 + + OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, + + RemainingItemCount: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 + + ResilientWatchCacheInitialization: {Default: true, PreRelease: featuregate.Beta}, + + RetryGenerateName: {Default: true, PreRelease: featuregate.Beta}, + + SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + + ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + + ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + + StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, + + StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, + + StrictCostEnforcementForVAP: {Default: false, PreRelease: featuregate.Beta}, + + StrictCostEnforcementForWebhooks: {Default: false, PreRelease: featuregate.Beta}, + + StructuredAuthenticationConfiguration: {Default: true, PreRelease: featuregate.Beta}, + + StructuredAuthorizationConfiguration: {Default: true, PreRelease: featuregate.Beta}, + + UnauthenticatedHTTP2DOSMitigation: {Default: true, PreRelease: featuregate.Beta}, + + WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + + WatchCacheInitializationPostStartHook: {Default: false, PreRelease: featuregate.Beta}, + + WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + + InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, + + WatchList: {Default: false, PreRelease: featuregate.Alpha}, + + ConsistentListFromCache: {Default: true, PreRelease: featuregate.Beta}, + + ZeroLimitedNominalConcurrencyShares: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.32 +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go new file mode 100644 index 00000000..6a7b93ad --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/cert_key.go @@ -0,0 +1,59 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" +) + +// certKeyContent holds the content for the cert and key +type certKeyContent struct { + cert []byte + key []byte +} + +func (c *certKeyContent) Equal(rhs *certKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.key, rhs.key) && bytes.Equal(c.cert, rhs.cert) +} + +// sniCertKeyContent holds the content for the cert and key as well as any explicit names +type sniCertKeyContent struct { + certKeyContent + sniNames []string +} + +func (c *sniCertKeyContent) Equal(rhs *sniCertKeyContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if len(c.sniNames) != len(rhs.sniNames) { + return false + } + + for i := range c.sniNames { + if c.sniNames[i] != rhs.sniNames[i] { + return false + } + } + + return c.certKeyContent.Equal(&rhs.certKeyContent) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go new file mode 100644 index 00000000..2c950f23 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/client_ca.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" +) + +// dynamicCertificateContent holds the content that overrides the baseTLSConfig +type dynamicCertificateContent struct { + // clientCA holds the content for the clientCA bundle + clientCA caBundleContent + servingCert certKeyContent + sniCerts []sniCertKeyContent +} + +// caBundleContent holds the content for the clientCA bundle. Wrapping the bytes makes the Equals work nicely with the +// method receiver. +type caBundleContent struct { + caBundle []byte +} + +func (c *dynamicCertificateContent) Equal(rhs *dynamicCertificateContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + if !c.clientCA.Equal(&rhs.clientCA) { + return false + } + + if !c.servingCert.Equal(&rhs.servingCert) { + return false + } + + if len(c.sniCerts) != len(rhs.sniCerts) { + return false + } + + for i := range c.sniCerts { + if !c.sniCerts[i].Equal(&rhs.sniCerts[i]) { + return false + } + } + + return true +} + +func (c *caBundleContent) Equal(rhs *caBundleContent) bool { + if c == nil || rhs == nil { + return c == rhs + } + + return bytes.Equal(c.caBundle, rhs.caBundle) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go new file mode 100644 index 00000000..845a45fa --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/configmap_cafile_content.go @@ -0,0 +1,277 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// ConfigMapCAController provies a CAContentProvider that can dynamically react to configmap changes +// It also fulfills the authenticator interface to provide verifyoptions +type ConfigMapCAController struct { + name string + + configmapLister corev1listers.ConfigMapLister + configmapNamespace string + configmapName string + configmapKey string + // configMapInformer is tracked so that we can start these on Run + configMapInformer cache.SharedIndexInformer + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + queue workqueue.TypedRateLimitingInterface[string] + // preRunCaches are the caches to sync before starting the work of this control loop + preRunCaches []cache.InformerSynced +} + +var _ CAContentProvider = &ConfigMapCAController{} +var _ ControllerRunner = &ConfigMapCAController{} + +// NewDynamicCAFromConfigMapController returns a CAContentProvider based on a configmap that automatically reloads content. +// It is near-realtime via an informer. +func NewDynamicCAFromConfigMapController(purpose, namespace, name, key string, kubeClient kubernetes.Interface) (*ConfigMapCAController, error) { + if len(purpose) == 0 { + return nil, fmt.Errorf("missing purpose for ca bundle") + } + if len(namespace) == 0 { + return nil, fmt.Errorf("missing namespace for ca bundle") + } + if len(name) == 0 { + return nil, fmt.Errorf("missing name for ca bundle") + } + if len(key) == 0 { + return nil, fmt.Errorf("missing key for ca bundle") + } + caContentName := fmt.Sprintf("%s::%s::%s::%s", purpose, namespace, name, key) + + // we construct our own informer because we need such a small subset of the information available. Just one namespace. + uncastConfigmapInformer := corev1informers.NewFilteredConfigMapInformer(kubeClient, namespace, 12*time.Hour, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(listOptions *v1.ListOptions) { + listOptions.FieldSelector = fields.OneTermEqualSelector("metadata.name", name).String() + }) + + configmapLister := corev1listers.NewConfigMapLister(uncastConfigmapInformer.GetIndexer()) + + c := &ConfigMapCAController{ + name: caContentName, + configmapNamespace: namespace, + configmapName: name, + configmapKey: key, + configmapLister: configmapLister, + configMapInformer: uncastConfigmapInformer, + + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicConfigMapCABundle-%s", purpose)}, + ), + preRunCaches: []cache.InformerSynced{uncastConfigmapInformer.HasSynced}, + } + + uncastConfigmapInformer.AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: func(obj interface{}) bool { + if cast, ok := obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if cast, ok := tombstone.Obj.(*corev1.ConfigMap); ok { + return cast.Name == c.configmapName && cast.Namespace == c.configmapNamespace + } + } + return true // always return true just in case. The checks are fairly cheap + }, + Handler: cache.ResourceEventHandlerFuncs{ + // we have a filter, so any time we're called, we may as well queue. We only ever check one configmap + // so we don't have to be choosy about our key. + AddFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + c.queue.Add(c.keyFn()) + }, + DeleteFunc: func(obj interface{}) { + c.queue.Add(c.keyFn()) + }, + }, + }) + + return c, nil +} + +func (c *ConfigMapCAController) keyFn() string { + // this format matches DeletionHandlingMetaNamespaceKeyFunc for our single key + return c.configmapNamespace + "/" + c.configmapName +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *ConfigMapCAController) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *ConfigMapCAController) loadCABundle() error { + configMap, err := c.configmapLister.ConfigMaps(c.configmapNamespace).Get(c.configmapName) + if err != nil { + return err + } + caBundle := configMap.Data[c.configmapKey] + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged([]byte(caBundle)) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), []byte(caBundle)) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *ConfigMapCAController) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *ConfigMapCAController) RunOnce(ctx context.Context) error { + // Ignore the error when running once because when using a dynamically loaded ca file, because we think it's better to have nothing for + // a brief time than completely crash. If crashing is necessary, higher order logic like a healthcheck and cause failures. + _ = c.loadCABundle() + return nil +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *ConfigMapCAController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.InfoS("Starting controller", "name", c.name) + defer klog.InfoS("Shutting down controller", "name", c.name) + + // we have a personal informer that is narrowly scoped, start it. + go c.configMapInformer.Run(ctx.Done()) + + // wait for your secondary caches to fill before starting your work + if !cache.WaitForNamedCacheSync(c.name, ctx.Done(), c.preRunCaches...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, ctx.Done()) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.PollImmediateUntil(FileRefreshDuration, func() (bool, error) { + c.queue.Add(workItemKey) + return false, nil + }, ctx.Done()) + + <-ctx.Done() +} + +func (c *ConfigMapCAController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *ConfigMapCAController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *ConfigMapCAController) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *ConfigMapCAController) CurrentCABundleContent() []byte { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return nil // this can happen if we've been unable load data from the apiserver for some reason + } + + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *ConfigMapCAController) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + // This can happen if we've been unable load data from the apiserver for some reason. + // In this case, we should not accept any connections on the basis of this ca bundle. + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go new file mode 100644 index 00000000..1f32adf9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_cafile_content.go @@ -0,0 +1,293 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "context" + "crypto/x509" + "fmt" + "os" + "sync/atomic" + "time" + + "github.com/fsnotify/fsnotify" + "k8s.io/client-go/util/cert" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// FileRefreshDuration is exposed so that integration tests can crank up the reload speed. +var FileRefreshDuration = 1 * time.Minute + +// ControllerRunner is a generic interface for starting a controller +type ControllerRunner interface { + // RunOnce runs the sync loop a single time. This useful for synchronous priming + RunOnce(ctx context.Context) error + + // Run should be called a go .Run + Run(ctx context.Context, workers int) +} + +// DynamicFileCAContent provides a CAContentProvider that can dynamically react to new file content +// It also fulfills the authenticator interface to provide verifyoptions +type DynamicFileCAContent struct { + name string + + // filename is the name the file to read. + filename string + + // caBundle is a caBundleAndVerifier that contains the last read, non-zero length content of the file + caBundle atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.TypedRateLimitingInterface[string] +} + +var _ Notifier = &DynamicFileCAContent{} +var _ CAContentProvider = &DynamicFileCAContent{} +var _ ControllerRunner = &DynamicFileCAContent{} + +type caBundleAndVerifier struct { + caBundle []byte + verifyOptions x509.VerifyOptions +} + +// NewDynamicCAContentFromFile returns a CAContentProvider based on a filename that automatically reloads content +func NewDynamicCAContentFromFile(purpose, filename string) (*DynamicFileCAContent, error) { + if len(filename) == 0 { + return nil, fmt.Errorf("missing filename for ca bundle") + } + name := fmt.Sprintf("%s::%s", purpose, filename) + + ret := &DynamicFileCAContent{ + name: name, + filename: filename, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)}, + ), + } + if err := ret.loadCABundle(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c *DynamicFileCAContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCABundle determines the next set of content for the file. +func (c *DynamicFileCAContent) loadCABundle() error { + caBundle, err := os.ReadFile(c.filename) + if err != nil { + return err + } + if len(caBundle) == 0 { + return fmt.Errorf("missing content for CA bundle %q", c.Name()) + } + + // check to see if we have a change. If the values are the same, do nothing. + if !c.hasCAChanged(caBundle) { + return nil + } + + caBundleAndVerifier, err := newCABundleAndVerifier(c.Name(), caBundle) + if err != nil { + return err + } + c.caBundle.Store(caBundleAndVerifier) + klog.V(2).InfoS("Loaded a new CA Bundle and Verifier", "name", c.Name()) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// hasCAChanged returns true if the caBundle is different than the current. +func (c *DynamicFileCAContent) hasCAChanged(caBundle []byte) bool { + uncastExisting := c.caBundle.Load() + if uncastExisting == nil { + return true + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := uncastExisting.(*caBundleAndVerifier) + if !ok { + return true + } + if !bytes.Equal(existing.caBundle, caBundle) { + return true + } + + return false +} + +// RunOnce runs a single sync loop +func (c *DynamicFileCAContent) RunOnce(ctx context.Context) error { + return c.loadCABundle() +} + +// Run starts the controller and blocks until stopCh is closed. +func (c *DynamicFileCAContent) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.InfoS("Starting controller", "name", c.name) + defer klog.InfoS("Shutting down controller", "name", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, ctx.Done()) + + // start the loop that watches the CA file until stopCh is closed. + go wait.Until(func() { + if err := c.watchCAFile(ctx.Done()); err != nil { + klog.ErrorS(err, "Failed to watch CA file, will retry later") + } + }, time.Minute, ctx.Done()) + + <-ctx.Done() +} + +func (c *DynamicFileCAContent) watchCAFile(stopCh <-chan struct{}) error { + // Trigger a check here to ensure the content will be checked periodically even if the following watch fails. + c.queue.Add(workItemKey) + + w, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("error creating fsnotify watcher: %v", err) + } + defer w.Close() + + if err = w.Add(c.filename); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.filename, err) + } + // Trigger a check in case the file is updated before the watch starts. + c.queue.Add(workItemKey) + + for { + select { + case e := <-w.Events: + if err := c.handleWatchEvent(e, w); err != nil { + return err + } + case err := <-w.Errors: + return fmt.Errorf("received fsnotify error: %v", err) + case <-stopCh: + return nil + } + } +} + +// handleWatchEvent triggers reloading the CA file, and restarts a new watch if it's a Remove or Rename event. +func (c *DynamicFileCAContent) handleWatchEvent(e fsnotify.Event, w *fsnotify.Watcher) error { + // This should be executed after restarting the watch (if applicable) to ensure no file event will be missing. + defer c.queue.Add(workItemKey) + if !e.Has(fsnotify.Remove) && !e.Has(fsnotify.Rename) { + return nil + } + if err := w.Remove(c.filename); err != nil { + klog.InfoS("Failed to remove file watch, it may have been deleted", "file", c.filename, "err", err) + } + if err := w.Add(c.filename); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.filename, err) + } + return nil +} + +func (c *DynamicFileCAContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicFileCAContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCABundle() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicFileCAContent) Name() string { + return c.name +} + +// CurrentCABundleContent provides ca bundle byte content +func (c *DynamicFileCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.Load().(*caBundleAndVerifier).caBundle +} + +// VerifyOptions provides verifyoptions compatible with authenticators +func (c *DynamicFileCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + uncastObj := c.caBundle.Load() + if uncastObj == nil { + return x509.VerifyOptions{}, false + } + + return uncastObj.(*caBundleAndVerifier).verifyOptions, true +} + +// newVerifyOptions creates a new verification func from a file. It reads the content and then fails. +// It will return a nil function if you pass an empty CA file. +func newCABundleAndVerifier(name string, caBundle []byte) (*caBundleAndVerifier, error) { + if len(caBundle) == 0 { + return nil, fmt.Errorf("missing content for CA bundle %q", name) + } + + // Wrap with an x509 verifier + var err error + verifyOptions := defaultVerifyOptions() + verifyOptions.Roots, err = cert.NewPoolFromBytes(caBundle) + if err != nil { + return nil, fmt.Errorf("error loading CA bundle for %q: %v", name, err) + } + + return &caBundleAndVerifier{ + caBundle: caBundle, + verifyOptions: verifyOptions, + }, nil +} + +// defaultVerifyOptions returns VerifyOptions that use the system root certificates, current time, +// and requires certificates to be valid for client auth (x509.ExtKeyUsageClientAuth) +func defaultVerifyOptions() x509.VerifyOptions { + return x509.VerifyOptions{ + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go new file mode 100644 index 00000000..e0dd0474 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_serving_content.go @@ -0,0 +1,236 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "sync/atomic" + "time" + + "github.com/fsnotify/fsnotify" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +// DynamicCertKeyPairContent provides a CertKeyContentProvider that can dynamically react to new file content +type DynamicCertKeyPairContent struct { + name string + + // certFile is the name of the certificate file to read. + certFile string + // keyFile is the name of the key file to read. + keyFile string + + // certKeyPair is a certKeyContent that contains the last read, non-zero length content of the key and cert + certKeyPair atomic.Value + + listeners []Listener + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.TypedRateLimitingInterface[string] +} + +var _ CertKeyContentProvider = &DynamicCertKeyPairContent{} +var _ ControllerRunner = &DynamicCertKeyPairContent{} + +// NewDynamicServingContentFromFiles returns a dynamic CertKeyContentProvider based on a cert and key filename +func NewDynamicServingContentFromFiles(purpose, certFile, keyFile string) (*DynamicCertKeyPairContent, error) { + if len(certFile) == 0 || len(keyFile) == 0 { + return nil, fmt.Errorf("missing filename for serving cert") + } + name := fmt.Sprintf("%s::%s::%s", purpose, certFile, keyFile) + + ret := &DynamicCertKeyPairContent{ + name: name, + certFile: certFile, + keyFile: keyFile, + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("DynamicCABundle-%s", purpose)}, + ), + } + if err := ret.loadCertKeyPair(); err != nil { + return nil, err + } + + return ret, nil +} + +// AddListener adds a listener to be notified when the serving cert content changes. +func (c *DynamicCertKeyPairContent) AddListener(listener Listener) { + c.listeners = append(c.listeners, listener) +} + +// loadCertKeyPair determines the next set of content for the file. +func (c *DynamicCertKeyPairContent) loadCertKeyPair() error { + cert, err := os.ReadFile(c.certFile) + if err != nil { + return err + } + key, err := os.ReadFile(c.keyFile) + if err != nil { + return err + } + if len(cert) == 0 || len(key) == 0 { + return fmt.Errorf("missing content for serving cert %q", c.Name()) + } + + // Ensure that the key matches the cert and both are valid + _, err = tls.X509KeyPair(cert, key) + if err != nil { + return err + } + + newCertKey := &certKeyContent{ + cert: cert, + key: key, + } + + // check to see if we have a change. If the values are the same, do nothing. + existing, ok := c.certKeyPair.Load().(*certKeyContent) + if ok && existing != nil && existing.Equal(newCertKey) { + return nil + } + + c.certKeyPair.Store(newCertKey) + klog.V(2).InfoS("Loaded a new cert/key pair", "name", c.Name()) + + for _, listener := range c.listeners { + listener.Enqueue() + } + + return nil +} + +// RunOnce runs a single sync loop +func (c *DynamicCertKeyPairContent) RunOnce(ctx context.Context) error { + return c.loadCertKeyPair() +} + +// Run starts the controller and blocks until context is killed. +func (c *DynamicCertKeyPairContent) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.InfoS("Starting controller", "name", c.name) + defer klog.InfoS("Shutting down controller", "name", c.name) + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, ctx.Done()) + + // start the loop that watches the cert and key files until stopCh is closed. + go wait.Until(func() { + if err := c.watchCertKeyFile(ctx.Done()); err != nil { + klog.ErrorS(err, "Failed to watch cert and key file, will retry later") + } + }, time.Minute, ctx.Done()) + + <-ctx.Done() +} + +func (c *DynamicCertKeyPairContent) watchCertKeyFile(stopCh <-chan struct{}) error { + // Trigger a check here to ensure the content will be checked periodically even if the following watch fails. + c.queue.Add(workItemKey) + + w, err := fsnotify.NewWatcher() + if err != nil { + return fmt.Errorf("error creating fsnotify watcher: %v", err) + } + defer w.Close() + + if err := w.Add(c.certFile); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.certFile, err) + } + if err := w.Add(c.keyFile); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", c.keyFile, err) + } + // Trigger a check in case the file is updated before the watch starts. + c.queue.Add(workItemKey) + + for { + select { + case e := <-w.Events: + if err := c.handleWatchEvent(e, w); err != nil { + return err + } + case err := <-w.Errors: + return fmt.Errorf("received fsnotify error: %v", err) + case <-stopCh: + return nil + } + } +} + +// handleWatchEvent triggers reloading the cert and key file, and restarts a new watch if it's a Remove or Rename event. +// If one file is updated before the other, the loadCertKeyPair method will catch the mismatch and will not apply the +// change. When an event of the other file is received, it will trigger reloading the files again and the new content +// will be loaded and used. +func (c *DynamicCertKeyPairContent) handleWatchEvent(e fsnotify.Event, w *fsnotify.Watcher) error { + // This should be executed after restarting the watch (if applicable) to ensure no file event will be missing. + defer c.queue.Add(workItemKey) + if !e.Has(fsnotify.Remove) && !e.Has(fsnotify.Rename) { + return nil + } + if err := w.Remove(e.Name); err != nil { + klog.InfoS("Failed to remove file watch, it may have been deleted", "file", e.Name, "err", err) + } + if err := w.Add(e.Name); err != nil { + return fmt.Errorf("error adding watch for file %s: %v", e.Name, err) + } + return nil +} + +func (c *DynamicCertKeyPairContent) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicCertKeyPairContent) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.loadCertKeyPair() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Name is just an identifier +func (c *DynamicCertKeyPairContent) Name() string { + return c.name +} + +// CurrentCertKeyContent provides cert and key byte content +func (c *DynamicCertKeyPairContent) CurrentCertKeyContent() ([]byte, []byte) { + certKeyContent := c.certKeyPair.Load().(*certKeyContent) + return certKeyContent.cert, certKeyContent.key +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go new file mode 100644 index 00000000..621fc9ff --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/dynamic_sni_content.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +// DynamicFileSNIContent provides a SNICertKeyContentProvider that can dynamically react to new file content +type DynamicFileSNIContent struct { + *DynamicCertKeyPairContent + sniNames []string +} + +var _ SNICertKeyContentProvider = &DynamicFileSNIContent{} +var _ ControllerRunner = &DynamicFileSNIContent{} + +// NewDynamicSNIContentFromFiles returns a dynamic SNICertKeyContentProvider based on a cert and key filename and explicit names +func NewDynamicSNIContentFromFiles(purpose, certFile, keyFile string, sniNames ...string) (*DynamicFileSNIContent, error) { + servingContent, err := NewDynamicServingContentFromFiles(purpose, certFile, keyFile) + if err != nil { + return nil, err + } + + ret := &DynamicFileSNIContent{ + DynamicCertKeyPairContent: servingContent, + sniNames: sniNames, + } + if err := ret.loadCertKeyPair(); err != nil { + return nil, err + } + + return ret, nil +} + +// SNINames returns explicitly set SNI names for the certificate. These are not dynamic. +func (c *DynamicFileSNIContent) SNINames() []string { + return c.sniNames +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/interfaces.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/interfaces.go new file mode 100644 index 00000000..7811c5d8 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/interfaces.go @@ -0,0 +1,68 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/x509" +) + +// Listener is an interface to use to notify interested parties of a change. +type Listener interface { + // Enqueue should be called when an input may have changed + Enqueue() +} + +// Notifier is a way to add listeners +type Notifier interface { + // AddListener is adds a listener to be notified of potential input changes. + // This is a noop on static providers. + AddListener(listener Listener) +} + +// CAContentProvider provides ca bundle byte content +type CAContentProvider interface { + Notifier + + // Name is just an identifier. + Name() string + // CurrentCABundleContent provides ca bundle byte content. Errors can be + // contained to the controllers initializing the value. By the time you get + // here, you should always be returning a value that won't fail. + CurrentCABundleContent() []byte + // VerifyOptions provides VerifyOptions for authenticators. + VerifyOptions() (x509.VerifyOptions, bool) +} + +// CertKeyContentProvider provides a certificate and matching private key. +type CertKeyContentProvider interface { + Notifier + + // Name is just an identifier. + Name() string + // CurrentCertKeyContent provides cert and key byte content. + CurrentCertKeyContent() ([]byte, []byte) +} + +// SNICertKeyContentProvider provides a certificate and matching private key as +// well as optional explicit names. +type SNICertKeyContentProvider interface { + Notifier + + CertKeyContentProvider + // SNINames provides names used for SNI. May return nil. + SNINames() []string +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go new file mode 100644 index 00000000..7b9637bc --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/named_certificates.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog/v2" + netutils "k8s.io/utils/net" +) + +// BuildNamedCertificates returns a map of *tls.Certificate by name. It's +// suitable for use in tls.Config#NamedCertificates. Returns an error if any of the certs +// is invalid. Returns nil if len(certs) == 0 +func (c *DynamicServingCertificateController) BuildNamedCertificates(sniCerts []sniCertKeyContent) (map[string]*tls.Certificate, error) { + nameToCertificate := map[string]*tls.Certificate{} + byNameExplicit := map[string]*tls.Certificate{} + + // Iterate backwards so that earlier certs take precedence in the names map + for i := len(sniCerts) - 1; i >= 0; i-- { + cert, err := tls.X509KeyPair(sniCerts[i].cert, sniCerts[i].key) + if err != nil { + return nil, fmt.Errorf("invalid SNI cert keypair [%d/%q]: %v", i, c.sniCerts[i].Name(), err) + } + + // error is not possible given above call to X509KeyPair + x509Cert, _ := x509.ParseCertificate(cert.Certificate[0]) + + names := sniCerts[i].sniNames + for _, name := range names { + byNameExplicit[name] = &cert + } + + klog.V(2).InfoS("Loaded SNI cert", "index", i, "certName", c.sniCerts[i].Name(), "certDetail", GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.sniCerts[i].Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "SNICertificateReload", "loaded SNI cert [%d/%q]: %s with explicit names %v", i, c.sniCerts[i].Name(), GetHumanCertDetail(x509Cert), names) + } + + if len(names) == 0 { + names = getCertificateNames(x509Cert) + for _, name := range names { + nameToCertificate[name] = &cert + } + } + } + + // Explicitly set names must override + for k, v := range byNameExplicit { + nameToCertificate[k] = v + } + + return nameToCertificate, nil +} + +// getCertificateNames returns names for an x509.Certificate. The names are +// suitable for use in tls.Config#NamedCertificates. +func getCertificateNames(cert *x509.Certificate) []string { + var names []string + + cn := cert.Subject.CommonName + cnIsIP := netutils.ParseIPSloppy(cn) != nil + cnIsValidDomain := cn == "*" || len(validation.IsDNS1123Subdomain(strings.TrimPrefix(cn, "*."))) == 0 + // don't use the CN if it is a valid IP because our IP serving detection may unexpectedly use it to terminate the connection. + if !cnIsIP && cnIsValidDomain { + names = append(names, cn) + } + names = append(names, cert.DNSNames...) + // intentionally all IPs in the cert are ignored as SNI forbids passing IPs + // to select a cert. Before go 1.6 the tls happily passed IPs as SNI values. + + return names +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go new file mode 100644 index 00000000..1934ed1d --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/static_content.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" +) + +type staticCAContent struct { + name string + caBundle *caBundleAndVerifier +} + +var _ CAContentProvider = &staticCAContent{} + +// NewStaticCAContent returns a CAContentProvider that always returns the same value +func NewStaticCAContent(name string, caBundle []byte) (CAContentProvider, error) { + caBundleAndVerifier, err := newCABundleAndVerifier(name, caBundle) + if err != nil { + return nil, err + } + + return &staticCAContent{ + name: name, + caBundle: caBundleAndVerifier, + }, nil +} + +// Name is just an identifier +func (c *staticCAContent) Name() string { + return c.name +} + +func (c *staticCAContent) AddListener(Listener) {} + +// CurrentCABundleContent provides ca bundle byte content +func (c *staticCAContent) CurrentCABundleContent() (cabundle []byte) { + return c.caBundle.caBundle +} + +func (c *staticCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + return c.caBundle.verifyOptions, true +} + +type staticCertKeyContent struct { + name string + cert []byte + key []byte +} + +// NewStaticCertKeyContent returns a CertKeyContentProvider that always returns the same value +func NewStaticCertKeyContent(name string, cert, key []byte) (CertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, nil +} + +// Name is just an identifier +func (c *staticCertKeyContent) Name() string { + return c.name +} + +func (c *staticCertKeyContent) AddListener(Listener) {} + +// CurrentCertKeyContent provides cert and key content +func (c *staticCertKeyContent) CurrentCertKeyContent() ([]byte, []byte) { + return c.cert, c.key +} + +type staticSNICertKeyContent struct { + staticCertKeyContent + sniNames []string +} + +// NewStaticSNICertKeyContent returns a SNICertKeyContentProvider that always returns the same value +func NewStaticSNICertKeyContent(name string, cert, key []byte, sniNames ...string) (SNICertKeyContentProvider, error) { + // Ensure that the key matches the cert and both are valid + _, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, err + } + + return &staticSNICertKeyContent{ + staticCertKeyContent: staticCertKeyContent{ + name: name, + cert: cert, + key: key, + }, + sniNames: sniNames, + }, nil +} + +func (c *staticSNICertKeyContent) SNINames() []string { + return c.sniNames +} + +func (c *staticSNICertKeyContent) AddListener(Listener) {} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go new file mode 100644 index 00000000..61bd6fde --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/tlsconfig.go @@ -0,0 +1,287 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net" + "sync/atomic" + "time" + + corev1 "k8s.io/api/core/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/events" + "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const workItemKey = "key" + +// DynamicServingCertificateController dynamically loads certificates and provides a golang tls compatible dynamic GetCertificate func. +type DynamicServingCertificateController struct { + // baseTLSConfig is the static portion of the tlsConfig for serving to clients. It is copied and the copy is mutated + // based on the dynamic cert state. + baseTLSConfig *tls.Config + + // clientCA provides the very latest content of the ca bundle + clientCA CAContentProvider + // servingCert provides the very latest content of the default serving certificate + servingCert CertKeyContentProvider + // sniCerts are a list of CertKeyContentProvider with associated names used for SNI + sniCerts []SNICertKeyContentProvider + + // currentlyServedContent holds the original bytes that we are serving. This is used to decide if we need to set a + // new atomic value. The types used for efficient TLSConfig preclude using the processed value. + currentlyServedContent *dynamicCertificateContent + // currentServingTLSConfig holds a *tls.Config that will be used to serve requests + currentServingTLSConfig atomic.Value + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.TypedRateLimitingInterface[string] + eventRecorder events.EventRecorder +} + +var _ Listener = &DynamicServingCertificateController{} + +// NewDynamicServingCertificateController returns a controller that can be used to keep a TLSConfig up to date. +func NewDynamicServingCertificateController( + baseTLSConfig *tls.Config, + clientCA CAContentProvider, + servingCert CertKeyContentProvider, + sniCerts []SNICertKeyContentProvider, + eventRecorder events.EventRecorder, +) *DynamicServingCertificateController { + c := &DynamicServingCertificateController{ + baseTLSConfig: baseTLSConfig, + clientCA: clientCA, + servingCert: servingCert, + sniCerts: sniCerts, + + queue: workqueue.NewTypedRateLimitingQueueWithConfig( + workqueue.DefaultTypedControllerRateLimiter[string](), + workqueue.TypedRateLimitingQueueConfig[string]{Name: "DynamicServingCertificateController"}, + ), + eventRecorder: eventRecorder, + } + + return c +} + +// GetConfigForClient is an implementation of tls.Config.GetConfigForClient +func (c *DynamicServingCertificateController) GetConfigForClient(clientHello *tls.ClientHelloInfo) (*tls.Config, error) { + uncastObj := c.currentServingTLSConfig.Load() + if uncastObj == nil { + return nil, errors.New("dynamiccertificates: configuration not ready") + } + tlsConfig, ok := uncastObj.(*tls.Config) + if !ok { + return nil, errors.New("dynamiccertificates: unexpected config type") + } + + tlsConfigCopy := tlsConfig.Clone() + + // if the client set SNI information, just use our "normal" SNI flow + if len(clientHello.ServerName) > 0 { + return tlsConfigCopy, nil + } + + // if the client didn't set SNI, then we need to inspect the requested IP so that we can choose + // a certificate from our list if we specifically handle that IP. This can happen when an IP is specifically mapped by name. + host, _, err := net.SplitHostPort(clientHello.Conn.LocalAddr().String()) + if err != nil { + return tlsConfigCopy, nil + } + + ipCert, ok := tlsConfigCopy.NameToCertificate[host] + if !ok { + return tlsConfigCopy, nil + } + tlsConfigCopy.Certificates = []tls.Certificate{*ipCert} + tlsConfigCopy.NameToCertificate = nil + + return tlsConfigCopy, nil +} + +// newTLSContent determines the next set of content for overriding the baseTLSConfig. +func (c *DynamicServingCertificateController) newTLSContent() (*dynamicCertificateContent, error) { + newContent := &dynamicCertificateContent{} + + if c.clientCA != nil { + currClientCABundle := c.clientCA.CurrentCABundleContent() + // we allow removing all client ca bundles because the server is still secure when this happens. it just means + // that there isn't a hint to clients about which client-cert to used. this happens when there is no client-ca + // yet known for authentication, which can happen in aggregated apiservers and some kube-apiserver deployment modes. + newContent.clientCA = caBundleContent{caBundle: currClientCABundle} + } + + if c.servingCert != nil { + currServingCert, currServingKey := c.servingCert.CurrentCertKeyContent() + if len(currServingCert) == 0 || len(currServingKey) == 0 { + return nil, fmt.Errorf("not loading an empty serving certificate from %q", c.servingCert.Name()) + } + + newContent.servingCert = certKeyContent{cert: currServingCert, key: currServingKey} + } + + for i, sniCert := range c.sniCerts { + currCert, currKey := sniCert.CurrentCertKeyContent() + if len(currCert) == 0 || len(currKey) == 0 { + return nil, fmt.Errorf("not loading an empty SNI certificate from %d/%q", i, sniCert.Name()) + } + + newContent.sniCerts = append(newContent.sniCerts, sniCertKeyContent{certKeyContent: certKeyContent{cert: currCert, key: currKey}, sniNames: sniCert.SNINames()}) + } + + return newContent, nil +} + +// syncCerts gets newTLSContent, if it has changed from the existing, the content is parsed and stored for usage in +// GetConfigForClient. +func (c *DynamicServingCertificateController) syncCerts() error { + newContent, err := c.newTLSContent() + if err != nil { + return err + } + // if the content is the same as what we currently have, we can simply skip it. This works because we are single + // threaded. If you ever make this multi-threaded, add a lock. + if newContent.Equal(c.currentlyServedContent) { + return nil + } + + // make a shallow copy and override the dynamic pieces which have changed. + newTLSConfigCopy := c.baseTLSConfig.Clone() + + // parse new content to add to TLSConfig + if len(newContent.clientCA.caBundle) > 0 { + newClientCAPool := x509.NewCertPool() + newClientCAs, err := cert.ParseCertsPEM(newContent.clientCA.caBundle) + if err != nil { + return fmt.Errorf("unable to load client CA file %q: %v", string(newContent.clientCA.caBundle), err) + } + for i, cert := range newClientCAs { + klog.V(2).InfoS("Loaded client CA", "index", i, "certName", c.clientCA.Name(), "certDetail", GetHumanCertDetail(cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.clientCA.Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "CACertificateReload", "loaded client CA [%d/%q]: %s", i, c.clientCA.Name(), GetHumanCertDetail(cert)) + } + + newClientCAPool.AddCert(cert) + } + + newTLSConfigCopy.ClientCAs = newClientCAPool + } + + if len(newContent.servingCert.cert) > 0 && len(newContent.servingCert.key) > 0 { + cert, err := tls.X509KeyPair(newContent.servingCert.cert, newContent.servingCert.key) + if err != nil { + return fmt.Errorf("invalid serving cert keypair: %v", err) + } + + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("invalid serving cert: %v", err) + } + + klog.V(2).InfoS("Loaded serving cert", "certName", c.servingCert.Name(), "certDetail", GetHumanCertDetail(x509Cert)) + if c.eventRecorder != nil { + c.eventRecorder.Eventf(&corev1.ObjectReference{Name: c.servingCert.Name()}, nil, corev1.EventTypeWarning, "TLSConfigChanged", "ServingCertificateReload", "loaded serving cert [%q]: %s", c.servingCert.Name(), GetHumanCertDetail(x509Cert)) + } + + newTLSConfigCopy.Certificates = []tls.Certificate{cert} + } + + if len(newContent.sniCerts) > 0 { + newTLSConfigCopy.NameToCertificate, err = c.BuildNamedCertificates(newContent.sniCerts) + if err != nil { + return fmt.Errorf("unable to build named certificate map: %v", err) + } + + // append all named certs. Otherwise, the go tls stack will think no SNI processing + // is necessary because there is only one cert anyway. + // Moreover, if servingCert is not set, the first SNI + // cert will become the default cert. That's what we expect anyway. + for _, sniCert := range newTLSConfigCopy.NameToCertificate { + newTLSConfigCopy.Certificates = append(newTLSConfigCopy.Certificates, *sniCert) + } + } + + // store new values of content for serving. + c.currentServingTLSConfig.Store(newTLSConfigCopy) + c.currentlyServedContent = newContent // this is single threaded, so we have no locking issue + + return nil +} + +// RunOnce runs a single sync step to ensure that we have a valid starting configuration. +func (c *DynamicServingCertificateController) RunOnce() error { + return c.syncCerts() +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *DynamicServingCertificateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.InfoS("Starting DynamicServingCertificateController") + defer klog.InfoS("Shutting down DynamicServingCertificateController") + + // synchronously load once. We will trigger again, so ignoring any error is fine + _ = c.RunOnce() + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // start timer that rechecks every minute, just in case. this also serves to prime the controller quickly. + go wait.Until(func() { + c.Enqueue() + }, 1*time.Minute, stopCh) + + <-stopCh +} + +func (c *DynamicServingCertificateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *DynamicServingCertificateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncCerts() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// Enqueue a method to allow separate control loops to cause the certificate controller to trigger and read content. +func (c *DynamicServingCertificateController) Enqueue() { + c.queue.Add(workItemKey) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go new file mode 100644 index 00000000..57622bd3 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/union_content.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "bytes" + "context" + "crypto/x509" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +type unionCAContent []CAContentProvider + +var _ CAContentProvider = &unionCAContent{} +var _ ControllerRunner = &unionCAContent{} + +// NewUnionCAContentProvider returns a CAContentProvider that is a union of other CAContentProviders +func NewUnionCAContentProvider(caContentProviders ...CAContentProvider) CAContentProvider { + return unionCAContent(caContentProviders) +} + +// Name is just an identifier +func (c unionCAContent) Name() string { + names := []string{} + for _, curr := range c { + names = append(names, curr.Name()) + } + return strings.Join(names, ",") +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) CurrentCABundleContent() []byte { + caBundles := [][]byte{} + for _, curr := range c { + if currCABytes := curr.CurrentCABundleContent(); len(currCABytes) > 0 { + caBundles = append(caBundles, []byte(strings.TrimSpace(string(currCABytes)))) + } + } + + return bytes.Join(caBundles, []byte("\n")) +} + +// CurrentCABundleContent provides ca bundle byte content +func (c unionCAContent) VerifyOptions() (x509.VerifyOptions, bool) { + currCABundle := c.CurrentCABundleContent() + if len(currCABundle) == 0 { + return x509.VerifyOptions{}, false + } + + // TODO make more efficient. This isn't actually used in any of our mainline paths. It's called to build the TLSConfig + // TODO on file changes, but the actual authentication runs against the individual items, not the union. + ret, err := newCABundleAndVerifier(c.Name(), c.CurrentCABundleContent()) + if err != nil { + // because we're made up of already vetted values, this indicates some kind of coding error + panic(err) + } + + return ret.verifyOptions, true +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) AddListener(listener Listener) { + for _, curr := range c { + curr.AddListener(listener) + } +} + +// AddListener adds a listener to be notified when the CA content changes. +func (c unionCAContent) RunOnce(ctx context.Context) error { + errors := []error{} + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + if err := controller.RunOnce(ctx); err != nil { + errors = append(errors, err) + } + } + } + + return utilerrors.NewAggregate(errors) +} + +// Run runs the controller +func (c unionCAContent) Run(ctx context.Context, workers int) { + for _, curr := range c { + if controller, ok := curr.(ControllerRunner); ok { + go controller.Run(ctx, workers) + } + } +} diff --git a/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go new file mode 100644 index 00000000..4e55a057 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/dynamiccertificates/util.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dynamiccertificates + +import ( + "crypto/x509" + "fmt" + "strings" + "time" +) + +// GetHumanCertDetail is a convenient method for printing compact details of certificate that helps when debugging +// kube-apiserver usage of certs. +func GetHumanCertDetail(certificate *x509.Certificate) string { + humanName := certificate.Subject.CommonName + signerHumanName := certificate.Issuer.CommonName + if certificate.Subject.CommonName == certificate.Issuer.CommonName { + signerHumanName = "" + } + + usages := []string{} + for _, curr := range certificate.ExtKeyUsage { + if curr == x509.ExtKeyUsageClientAuth { + usages = append(usages, "client") + continue + } + if curr == x509.ExtKeyUsageServerAuth { + usages = append(usages, "serving") + continue + } + + usages = append(usages, fmt.Sprintf("%d", curr)) + } + + validServingNames := []string{} + for _, ip := range certificate.IPAddresses { + validServingNames = append(validServingNames, ip.String()) + } + validServingNames = append(validServingNames, certificate.DNSNames...) + servingString := "" + if len(validServingNames) > 0 { + servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) + } + + groupString := "" + if len(certificate.Subject.Organization) > 0 { + groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) + } + + return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, servingString, signerHumanName, certificate.NotBefore.UTC(), certificate.NotAfter.UTC(), + time.Now().UTC()) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go new file mode 100644 index 00000000..ce9a3691 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/config.go @@ -0,0 +1,254 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package egressselector + +import ( + "fmt" + "os" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/apis/apiserver" + "k8s.io/apiserver/pkg/apis/apiserver/install" + "k8s.io/apiserver/pkg/apis/apiserver/v1beta1" + "k8s.io/utils/path" + "sigs.k8s.io/yaml" +) + +var cfgScheme = runtime.NewScheme() + +// validEgressSelectorNames contains the set of valid egress selctor names. +var validEgressSelectorNames = sets.NewString("controlplane", "cluster", "etcd") + +func init() { + install.Install(cfgScheme) +} + +// ReadEgressSelectorConfiguration reads the egress selector configuration at the specified path. +// It returns the loaded egress selector configuration if the input file aligns with the required syntax. +// If it does not align with the provided syntax, it returns a default configuration which should function as a no-op. +// It does this by returning a nil configuration, which preserves backward compatibility. +// This works because prior to this there was no egress selector configuration. +// It returns an error if the file did not exist. +func ReadEgressSelectorConfiguration(configFilePath string) (*apiserver.EgressSelectorConfiguration, error) { + if configFilePath == "" { + return nil, nil + } + // a file was provided, so we just read it. + data, err := os.ReadFile(configFilePath) + if err != nil { + return nil, fmt.Errorf("unable to read egress selector configuration from %q [%v]", configFilePath, err) + } + var decodedConfig v1beta1.EgressSelectorConfiguration + err = yaml.Unmarshal(data, &decodedConfig) + if err != nil { + // we got an error where the decode wasn't related to a missing type + return nil, err + } + if decodedConfig.Kind != "EgressSelectorConfiguration" { + return nil, fmt.Errorf("invalid service configuration object %q", decodedConfig.Kind) + } + internalConfig := &apiserver.EgressSelectorConfiguration{} + if err := cfgScheme.Convert(&decodedConfig, internalConfig, nil); err != nil { + // we got an error where the decode wasn't related to a missing type + return nil, err + } + return internalConfig, nil +} + +// ValidateEgressSelectorConfiguration checks the apiserver.EgressSelectorConfiguration for +// common configuration errors. It will return error for problems such as configuring mtls/cert +// settings for protocol which do not support security. It will also try to catch errors such as +// incorrect file paths. It will return nil if it does not find anything wrong. +func ValidateEgressSelectorConfiguration(config *apiserver.EgressSelectorConfiguration) field.ErrorList { + allErrs := field.ErrorList{} + if config == nil { + return allErrs // Treating a nil configuration as valid + } + for _, service := range config.EgressSelections { + fldPath := field.NewPath("service", "connection") + switch service.Connection.ProxyProtocol { + case apiserver.ProtocolDirect: + allErrs = append(allErrs, validateDirectConnection(service.Connection, fldPath)...) + case apiserver.ProtocolHTTPConnect: + allErrs = append(allErrs, validateHTTPConnectTransport(service.Connection.Transport, fldPath)...) + case apiserver.ProtocolGRPC: + allErrs = append(allErrs, validateGRPCTransport(service.Connection.Transport, fldPath)...) + default: + allErrs = append(allErrs, field.NotSupported( + fldPath.Child("protocol"), + service.Connection.ProxyProtocol, + []string{ + string(apiserver.ProtocolDirect), + string(apiserver.ProtocolHTTPConnect), + string(apiserver.ProtocolGRPC), + })) + } + } + + seen := sets.String{} + for i, service := range config.EgressSelections { + canonicalName := strings.ToLower(service.Name) + fldPath := field.NewPath("service", "connection") + // no duplicate check + if seen.Has(canonicalName) { + allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), canonicalName)) + continue + } + seen.Insert(canonicalName) + + if !validEgressSelectorNames.Has(canonicalName) { + allErrs = append(allErrs, field.NotSupported(fldPath, canonicalName, validEgressSelectorNames.List())) + continue + } + } + + return allErrs +} + +func validateHTTPConnectTransport(transport *apiserver.Transport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if transport == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("transport"), + "transport must be set for HTTPConnect")) + return allErrs + } + + if transport.TCP != nil && transport.UDS != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tcp"), + transport.TCP, + "TCP and UDS cannot both be set")) + } else if transport.TCP == nil && transport.UDS == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("tcp"), + "One of TCP or UDS must be set")) + } else if transport.TCP != nil { + allErrs = append(allErrs, validateTCPConnection(transport.TCP, fldPath)...) + } else if transport.UDS != nil { + allErrs = append(allErrs, validateUDSConnection(transport.UDS, fldPath)...) + } + return allErrs +} + +func validateGRPCTransport(transport *apiserver.Transport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if transport == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("transport"), + "transport must be set for GRPC")) + return allErrs + } + + if transport.UDS != nil { + allErrs = append(allErrs, validateUDSConnection(transport.UDS, fldPath)...) + } else { + allErrs = append(allErrs, field.Required( + fldPath.Child("uds"), + "UDS must be set with GRPC")) + } + return allErrs +} + +func validateDirectConnection(connection apiserver.Connection, fldPath *field.Path) field.ErrorList { + if connection.Transport != nil { + return field.ErrorList{field.Invalid( + fldPath.Child("transport"), + "direct", + "Transport config should be absent for direct connect"), + } + } + + return nil +} + +func validateUDSConnection(udsConfig *apiserver.UDSTransport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if udsConfig.UDSName == "" { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("udsName"), + "nil", + "UDSName should be present for UDS connections")) + } + return allErrs +} + +func validateTCPConnection(tcpConfig *apiserver.TCPTransport, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if strings.HasPrefix(tcpConfig.URL, "http://") { + if tcpConfig.TLSConfig != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig"), + "nil", + "TLSConfig config should not be present when using HTTP")) + } + } else if strings.HasPrefix(tcpConfig.URL, "https://") { + return validateTLSConfig(tcpConfig.TLSConfig, fldPath) + } else { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("url"), + tcpConfig.URL, + "supported connection protocols are http:// and https://")) + } + return allErrs +} + +func validateTLSConfig(tlsConfig *apiserver.TLSConfig, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if tlsConfig == nil { + allErrs = append(allErrs, field.Required( + fldPath.Child("tlsConfig"), + "TLSConfig must be present when using HTTPS")) + return allErrs + } + if tlsConfig.CABundle != "" { + if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.CABundle); !exists || err != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "caBundle"), + tlsConfig.CABundle, + "TLS config ca bundle does not exist")) + } + } + if tlsConfig.ClientCert == "" { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientCert"), + "nil", + "Using TLS requires clientCert")) + } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientCert); !exists || err != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientCert"), + tlsConfig.ClientCert, + "TLS client cert does not exist")) + } + if tlsConfig.ClientKey == "" { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientKey"), + "nil", + "Using TLS requires requires clientKey")) + } else if exists, err := path.Exists(path.CheckFollowSymlink, tlsConfig.ClientKey); !exists || err != nil { + allErrs = append(allErrs, field.Invalid( + fldPath.Child("tlsConfig", "clientKey"), + tlsConfig.ClientKey, + "TLS client key does not exist")) + } + return allErrs +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go new file mode 100644 index 00000000..a38ef646 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/egress_selector.go @@ -0,0 +1,406 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package egressselector + +import ( + "bufio" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" + + "go.opentelemetry.io/otel/attribute" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/apis/apiserver" + egressmetrics "k8s.io/apiserver/pkg/server/egressselector/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/component-base/tracing" + "k8s.io/klog/v2" + client "sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client" +) + +var directDialer utilnet.DialFunc = http.DefaultTransport.(*http.Transport).DialContext + +func init() { + client.Metrics.RegisterMetrics(legacyregistry.Registerer()) +} + +// EgressSelector is the map of network context type to context dialer, for network egress. +type EgressSelector struct { + egressToDialer map[EgressType]utilnet.DialFunc +} + +// EgressType is an indicator of which egress selection should be used for sending traffic. +// See https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/1281-network-proxy/README.md#network-context +type EgressType int + +const ( + // ControlPlane is the EgressType for traffic intended to go to the control plane. + ControlPlane EgressType = iota + // Etcd is the EgressType for traffic intended to go to Kubernetes persistence store. + Etcd + // Cluster is the EgressType for traffic intended to go to the system being managed by Kubernetes. + Cluster +) + +// NetworkContext is the struct used by Kubernetes API Server to indicate where it intends traffic to be sent. +type NetworkContext struct { + // EgressSelectionName is the unique name of the + // EgressSelectorConfiguration which determines + // the network we route the traffic to. + EgressSelectionName EgressType +} + +// Lookup is the interface to get the dialer function for the network context. +type Lookup func(networkContext NetworkContext) (utilnet.DialFunc, error) + +// String returns the canonical string representation of the egress type +func (s EgressType) String() string { + switch s { + case ControlPlane: + return "controlplane" + case Etcd: + return "etcd" + case Cluster: + return "cluster" + default: + return "invalid" + } +} + +// AsNetworkContext is a helper function to make it easy to get the basic NetworkContext objects. +func (s EgressType) AsNetworkContext() NetworkContext { + return NetworkContext{EgressSelectionName: s} +} + +func lookupServiceName(name string) (EgressType, error) { + switch strings.ToLower(name) { + case "controlplane": + return ControlPlane, nil + case "etcd": + return Etcd, nil + case "cluster": + return Cluster, nil + } + return -1, fmt.Errorf("unrecognized service name %s", name) +} + +func tunnelHTTPConnect(proxyConn net.Conn, proxyAddress, addr string) (net.Conn, error) { + fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", addr, "127.0.0.1") + br := bufio.NewReader(proxyConn) + res, err := http.ReadResponse(br, nil) + if err != nil { + proxyConn.Close() + return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %v", + addr, proxyAddress, err) + } + if res.StatusCode != 200 { + proxyConn.Close() + return nil, fmt.Errorf("proxy error from %s while dialing %s, code %d: %v", + proxyAddress, addr, res.StatusCode, res.Status) + } + + // It's safe to discard the bufio.Reader here and return the + // original TCP conn directly because we only use this for + // TLS, and in TLS the client speaks first, so we know there's + // no unbuffered data. But we can double-check. + if br.Buffered() > 0 { + proxyConn.Close() + return nil, fmt.Errorf("unexpected %d bytes of buffered data from CONNECT proxy %q", + br.Buffered(), proxyAddress) + } + return proxyConn, nil +} + +type proxier interface { + // proxy returns a connection to addr. + proxy(ctx context.Context, addr string) (net.Conn, error) +} + +var _ proxier = &httpConnectProxier{} + +type httpConnectProxier struct { + conn net.Conn + proxyAddress string +} + +func (t *httpConnectProxier) proxy(ctx context.Context, addr string) (net.Conn, error) { + return tunnelHTTPConnect(t.conn, t.proxyAddress, addr) +} + +var _ proxier = &grpcProxier{} + +type grpcProxier struct { + tunnel client.Tunnel +} + +func (g *grpcProxier) proxy(ctx context.Context, addr string) (net.Conn, error) { + return g.tunnel.DialContext(ctx, "tcp", addr) +} + +type proxyServerConnector interface { + // connect establishes connection to the proxy server, and returns a + // proxier based on the connection. + // + // The provided Context must be non-nil. The context is used for connecting to the proxy only. + // If the context expires before the connection is complete, an error is returned. + // Once successfully connected to the proxy, any expiration of the context will not affect the connection. + connect(context.Context) (proxier, error) +} + +type tcpHTTPConnectConnector struct { + proxyAddress string + tlsConfig *tls.Config +} + +func (t *tcpHTTPConnectConnector) connect(ctx context.Context) (proxier, error) { + d := tls.Dialer{ + Config: t.tlsConfig, + } + conn, err := d.DialContext(ctx, "tcp", t.proxyAddress) + if err != nil { + return nil, err + } + return &httpConnectProxier{conn: conn, proxyAddress: t.proxyAddress}, nil +} + +type udsHTTPConnectConnector struct { + udsName string +} + +func (u *udsHTTPConnectConnector) connect(ctx context.Context) (proxier, error) { + var d net.Dialer + conn, err := d.DialContext(ctx, "unix", u.udsName) + if err != nil { + return nil, err + } + return &httpConnectProxier{conn: conn, proxyAddress: u.udsName}, nil +} + +type udsGRPCConnector struct { + udsName string +} + +// connect establishes a connection to a proxy over gRPC. +// TODO At the moment, it does not use the provided context. +func (u *udsGRPCConnector) connect(_ context.Context) (proxier, error) { + udsName := u.udsName + dialOption := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { + var d net.Dialer + c, err := d.DialContext(ctx, "unix", udsName) + if err != nil { + klog.Errorf("failed to create connection to uds name %s, error: %v", udsName, err) + } + return c, err + }) + + // CreateSingleUseGrpcTunnel() unfortunately couples dial and connection contexts. Because of that, + // we cannot use ctx just for dialing and control the connection lifetime separately. + // See https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/357. + tunnelCtx := context.TODO() + tunnel, err := client.CreateSingleUseGrpcTunnel(tunnelCtx, udsName, dialOption, + grpc.WithBlock(), + grpc.WithReturnConnectionError(), + grpc.WithTimeout(30*time.Second), // matches http.DefaultTransport dial timeout + grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + return nil, err + } + return &grpcProxier{tunnel: tunnel}, nil +} + +type dialerCreator struct { + connector proxyServerConnector + direct bool + options metricsOptions +} + +type metricsOptions struct { + transport string + protocol string +} + +func (d *dialerCreator) createDialer() utilnet.DialFunc { + if d.direct { + return directDialer + } + return func(ctx context.Context, network, addr string) (net.Conn, error) { + ctx, span := tracing.Start(ctx, fmt.Sprintf("Proxy via %s protocol over %s", d.options.protocol, d.options.transport), attribute.String("address", addr)) + defer span.End(500 * time.Millisecond) + start := egressmetrics.Metrics.Clock().Now() + egressmetrics.Metrics.ObserveDialStart(d.options.protocol, d.options.transport) + proxier, err := d.connector.connect(ctx) + if err != nil { + egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageConnect) + return nil, err + } + conn, err := proxier.proxy(ctx, addr) + if err != nil { + egressmetrics.Metrics.ObserveDialFailure(d.options.protocol, d.options.transport, egressmetrics.StageProxy) + return nil, err + } + egressmetrics.Metrics.ObserveDialLatency(egressmetrics.Metrics.Clock().Now().Sub(start), d.options.protocol, d.options.transport) + return conn, nil + } +} + +func getTLSConfig(t *apiserver.TLSConfig) (*tls.Config, error) { + clientCert := t.ClientCert + clientKey := t.ClientKey + caCert := t.CABundle + clientCerts, err := tls.LoadX509KeyPair(clientCert, clientKey) + if err != nil { + return nil, fmt.Errorf("failed to read key pair %s & %s, got %v", clientCert, clientKey, err) + } + certPool := x509.NewCertPool() + if caCert != "" { + certBytes, err := os.ReadFile(caCert) + if err != nil { + return nil, fmt.Errorf("failed to read cert file %s, got %v", caCert, err) + } + ok := certPool.AppendCertsFromPEM(certBytes) + if !ok { + return nil, fmt.Errorf("failed to append CA cert to the cert pool") + } + } else { + // Use host's root CA set instead of providing our own + certPool = nil + } + return &tls.Config{ + Certificates: []tls.Certificate{clientCerts}, + RootCAs: certPool, + }, nil +} + +func getProxyAddress(urlString string) (string, error) { + proxyURL, err := url.Parse(urlString) + if err != nil { + return "", fmt.Errorf("invalid proxy server url %q: %v", urlString, err) + } + return proxyURL.Host, nil +} + +func connectionToDialerCreator(c apiserver.Connection) (*dialerCreator, error) { + switch c.ProxyProtocol { + + case apiserver.ProtocolHTTPConnect: + if c.Transport.UDS != nil { + return &dialerCreator{ + connector: &udsHTTPConnectConnector{ + udsName: c.Transport.UDS.UDSName, + }, + options: metricsOptions{ + transport: egressmetrics.TransportUDS, + protocol: egressmetrics.ProtocolHTTPConnect, + }, + }, nil + } else if c.Transport.TCP != nil { + tlsConfig, err := getTLSConfig(c.Transport.TCP.TLSConfig) + if err != nil { + return nil, err + } + proxyAddress, err := getProxyAddress(c.Transport.TCP.URL) + if err != nil { + return nil, err + } + return &dialerCreator{ + connector: &tcpHTTPConnectConnector{ + tlsConfig: tlsConfig, + proxyAddress: proxyAddress, + }, + options: metricsOptions{ + transport: egressmetrics.TransportTCP, + protocol: egressmetrics.ProtocolHTTPConnect, + }, + }, nil + } else { + return nil, fmt.Errorf("Either a TCP or UDS transport must be specified") + } + case apiserver.ProtocolGRPC: + if c.Transport.UDS != nil { + return &dialerCreator{ + connector: &udsGRPCConnector{ + udsName: c.Transport.UDS.UDSName, + }, + options: metricsOptions{ + transport: egressmetrics.TransportUDS, + protocol: egressmetrics.ProtocolGRPC, + }, + }, nil + } + return nil, fmt.Errorf("UDS transport must be specified for GRPC") + case apiserver.ProtocolDirect: + return &dialerCreator{direct: true}, nil + default: + return nil, fmt.Errorf("unrecognized service connection protocol %q", c.ProxyProtocol) + } + +} + +// NewEgressSelector configures lookup mechanism for Lookup. +// It does so based on a EgressSelectorConfiguration which was read at startup. +func NewEgressSelector(config *apiserver.EgressSelectorConfiguration) (*EgressSelector, error) { + if config == nil || config.EgressSelections == nil { + // No Connection Services configured, leaving the serviceMap empty, will return default dialer. + return nil, nil + } + cs := &EgressSelector{ + egressToDialer: make(map[EgressType]utilnet.DialFunc), + } + for _, service := range config.EgressSelections { + name, err := lookupServiceName(service.Name) + if err != nil { + return nil, err + } + dialerCreator, err := connectionToDialerCreator(service.Connection) + if err != nil { + return nil, fmt.Errorf("failed to create dialer for egressSelection %q: %v", name, err) + } + cs.egressToDialer[name] = dialerCreator.createDialer() + } + return cs, nil +} + +// NewEgressSelectorWithMap returns a EgressSelector with the supplied EgressType to DialFunc map. +func NewEgressSelectorWithMap(m map[EgressType]utilnet.DialFunc) *EgressSelector { + if m == nil { + m = make(map[EgressType]utilnet.DialFunc) + } + return &EgressSelector{ + egressToDialer: m, + } +} + +// Lookup gets the dialer function for the network context. +// This is configured for the Kubernetes API Server at startup. +func (cs *EgressSelector) Lookup(networkContext NetworkContext) (utilnet.DialFunc, error) { + if cs.egressToDialer == nil { + // The round trip wrapper will over-ride the dialContext method appropriately + return nil, nil + } + + return cs.egressToDialer[networkContext.EgressSelectionName], nil +} diff --git a/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go new file mode 100644 index 00000000..2e39947c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/server/egressselector/metrics/metrics.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/utils/clock" +) + +const ( + namespace = "apiserver" + subsystem = "egress_dialer" + + // ProtocolHTTPConnect means that the proxy protocol is http-connect. + ProtocolHTTPConnect = "http_connect" + // ProtocolGRPC means that the proxy protocol is the GRPC protocol. + ProtocolGRPC = "grpc" + // TransportTCP means that the transport is TCP. + TransportTCP = "tcp" + // TransportUDS means that the transport is UDS. + TransportUDS = "uds" + // StageConnect indicates that the dial failed at establishing connection to the proxy server. + StageConnect = "connect" + // StageProxy indicates that the dial failed at requesting the proxy server to proxy. + StageProxy = "proxy" +) + +var ( + // Use buckets ranging from 5 ms to 12.5 seconds. + latencyBuckets = []float64{0.005, 0.025, 0.1, 0.5, 2.5, 12.5} + + // Metrics provides access to all dial metrics. + Metrics = newDialMetrics() +) + +// DialMetrics instruments dials to proxy server with prometheus metrics. +type DialMetrics struct { + clock clock.Clock + starts *metrics.CounterVec + latencies *metrics.HistogramVec + failures *metrics.CounterVec +} + +// newDialMetrics create a new DialMetrics, configured with default metric names. +func newDialMetrics() *DialMetrics { + starts := metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dial_start_total", + Help: "Dial starts, labeled by the protocol (http-connect or grpc) and transport (tcp or uds).", + StabilityLevel: metrics.ALPHA, + }, + []string{"protocol", "transport"}, + ) + + latencies := metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dial_duration_seconds", + Help: "Dial latency histogram in seconds, labeled by the protocol (http-connect or grpc), transport (tcp or uds)", + Buckets: latencyBuckets, + StabilityLevel: metrics.ALPHA, + }, + []string{"protocol", "transport"}, + ) + + failures := metrics.NewCounterVec( + &metrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "dial_failure_count", + Help: "Dial failure count, labeled by the protocol (http-connect or grpc), transport (tcp or uds), and stage (connect or proxy). The stage indicates at which stage the dial failed", + StabilityLevel: metrics.ALPHA, + }, + []string{"protocol", "transport", "stage"}, + ) + + legacyregistry.MustRegister(starts) + legacyregistry.MustRegister(latencies) + legacyregistry.MustRegister(failures) + return &DialMetrics{starts: starts, latencies: latencies, failures: failures, clock: clock.RealClock{}} +} + +// Clock returns the clock. +func (m *DialMetrics) Clock() clock.Clock { + return m.clock +} + +// SetClock sets the clock. +func (m *DialMetrics) SetClock(c clock.Clock) { + m.clock = c +} + +// Reset resets the metrics. +func (m *DialMetrics) Reset() { + m.starts.Reset() + m.latencies.Reset() + m.failures.Reset() +} + +// ObserveDialStart records the start of a dial attempt, labeled by protocol, transport. +func (m *DialMetrics) ObserveDialStart(protocol, transport string) { + m.starts.WithLabelValues(protocol, transport).Inc() +} + +// ObserveDialLatency records the latency of a dial, labeled by protocol, transport. +func (m *DialMetrics) ObserveDialLatency(elapsed time.Duration, protocol, transport string) { + m.latencies.WithLabelValues(protocol, transport).Observe(elapsed.Seconds()) +} + +// ObserveDialFailure records a failed dial, labeled by protocol, transport, and the stage the dial failed at. +func (m *DialMetrics) ObserveDialFailure(protocol, transport, stage string) { + m.failures.WithLabelValues(protocol, transport, stage).Inc() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go new file mode 100644 index 00000000..00a9e099 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "k8s.io/component-base/featuregate" +) + +var ( + // DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate. + // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. + // Tests that need to modify feature gates for the duration of their test should use: + // featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., ) + DefaultMutableFeatureGate featuregate.MutableVersionedFeatureGate = featuregate.NewFeatureGate() + + // DefaultFeatureGate is a shared global FeatureGate. + // Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate. + DefaultFeatureGate featuregate.FeatureGate = DefaultMutableFeatureGate +) diff --git a/vendor/k8s.io/apiserver/pkg/util/version/registry.go b/vendor/k8s.io/apiserver/pkg/util/version/registry.go new file mode 100644 index 00000000..0db9c48a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/version/registry.go @@ -0,0 +1,454 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "sort" + "strings" + "sync" + + "github.com/spf13/pflag" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/version" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/featuregate" + "k8s.io/klog/v2" +) + +// DefaultComponentGlobalsRegistry is the global var to store the effective versions and feature gates for all components for easy access. +// Example usage: +// // register the component effective version and feature gate first +// _, _ = utilversion.DefaultComponentGlobalsRegistry.ComponentGlobalsOrRegister(utilversion.DefaultKubeComponent, utilversion.DefaultKubeEffectiveVersion(), utilfeature.DefaultMutableFeatureGate) +// wardleEffectiveVersion := utilversion.NewEffectiveVersion("1.2") +// wardleFeatureGate := featuregate.NewFeatureGate() +// utilruntime.Must(utilversion.DefaultComponentGlobalsRegistry.Register(apiserver.WardleComponentName, wardleEffectiveVersion, wardleFeatureGate, false)) +// +// cmd := &cobra.Command{ +// ... +// // call DefaultComponentGlobalsRegistry.Set() in PersistentPreRunE +// PersistentPreRunE: func(*cobra.Command, []string) error { +// if err := utilversion.DefaultComponentGlobalsRegistry.Set(); err != nil { +// return err +// } +// ... +// }, +// RunE: func(c *cobra.Command, args []string) error { +// // call utilversion.DefaultComponentGlobalsRegistry.Validate() somewhere +// }, +// } +// +// flags := cmd.Flags() +// // add flags +// utilversion.DefaultComponentGlobalsRegistry.AddFlags(flags) +var DefaultComponentGlobalsRegistry ComponentGlobalsRegistry = NewComponentGlobalsRegistry() + +const ( + DefaultKubeComponent = "kube" + + klogLevel = 2 +) + +type VersionMapping func(from *version.Version) *version.Version + +// ComponentGlobals stores the global variables for a component for easy access. +type ComponentGlobals struct { + effectiveVersion MutableEffectiveVersion + featureGate featuregate.MutableVersionedFeatureGate + + // emulationVersionMapping contains the mapping from the emulation version of this component + // to the emulation version of another component. + emulationVersionMapping map[string]VersionMapping + // dependentEmulationVersion stores whether or not this component's EmulationVersion is dependent through mapping on another component. + // If true, the emulation version cannot be set from the flag, or version mapping from another component. + dependentEmulationVersion bool + // minCompatibilityVersionMapping contains the mapping from the min compatibility version of this component + // to the min compatibility version of another component. + minCompatibilityVersionMapping map[string]VersionMapping + // dependentMinCompatibilityVersion stores whether or not this component's MinCompatibilityVersion is dependent through mapping on another component + // If true, the min compatibility version cannot be set from the flag, or version mapping from another component. + dependentMinCompatibilityVersion bool +} + +type ComponentGlobalsRegistry interface { + // EffectiveVersionFor returns the EffectiveVersion registered under the component. + // Returns nil if the component is not registered. + EffectiveVersionFor(component string) EffectiveVersion + // FeatureGateFor returns the FeatureGate registered under the component. + // Returns nil if the component is not registered. + FeatureGateFor(component string) featuregate.FeatureGate + // Register registers the EffectiveVersion and FeatureGate for a component. + // returns error if the component is already registered. + Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error + // ComponentGlobalsOrRegister would return the registered global variables for the component if it already exists in the registry. + // Otherwise, the provided variables would be registered under the component, and the same variables would be returned. + ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) + // AddFlags adds flags of "--emulated-version" and "--feature-gates" + AddFlags(fs *pflag.FlagSet) + // Set sets the flags for all global variables for all components registered. + Set() error + // SetFallback calls Set() if it has never been called. + SetFallback() error + // Validate calls the Validate() function for all the global variables for all components registered. + Validate() []error + // Reset removes all stored ComponentGlobals, configurations, and version mappings. + Reset() + // SetEmulationVersionMapping sets the mapping from the emulation version of one component + // to the emulation version of another component. + // Once set, the emulation version of the toComponent will be determined by the emulation version of the fromComponent, + // and cannot be set from cmd flags anymore. + // For a given component, its emulation version can only depend on one other component, no multiple dependency is allowed. + SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error +} + +type componentGlobalsRegistry struct { + componentGlobals map[string]*ComponentGlobals + mutex sync.RWMutex + // list of component name to emulation version set from the flag. + emulationVersionConfig []string + // map of component name to the list of feature gates set from the flag. + featureGatesConfig map[string][]string + // set stores if the Set() function for the registry is already called. + set bool +} + +func NewComponentGlobalsRegistry() *componentGlobalsRegistry { + return &componentGlobalsRegistry{ + componentGlobals: make(map[string]*ComponentGlobals), + emulationVersionConfig: nil, + featureGatesConfig: nil, + } +} + +func (r *componentGlobalsRegistry) Reset() { + r.mutex.Lock() + defer r.mutex.Unlock() + r.componentGlobals = make(map[string]*ComponentGlobals) + r.emulationVersionConfig = nil + r.featureGatesConfig = nil + r.set = false +} + +func (r *componentGlobalsRegistry) EffectiveVersionFor(component string) EffectiveVersion { + r.mutex.RLock() + defer r.mutex.RUnlock() + globals, ok := r.componentGlobals[component] + if !ok { + return nil + } + return globals.effectiveVersion +} + +func (r *componentGlobalsRegistry) FeatureGateFor(component string) featuregate.FeatureGate { + r.mutex.RLock() + defer r.mutex.RUnlock() + globals, ok := r.componentGlobals[component] + if !ok { + return nil + } + return globals.featureGate +} + +func (r *componentGlobalsRegistry) unsafeRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error { + if _, ok := r.componentGlobals[component]; ok { + return fmt.Errorf("component globals of %s already registered", component) + } + if featureGate != nil { + if err := featureGate.SetEmulationVersion(effectiveVersion.EmulationVersion()); err != nil { + return err + } + } + c := ComponentGlobals{ + effectiveVersion: effectiveVersion, + featureGate: featureGate, + emulationVersionMapping: make(map[string]VersionMapping), + minCompatibilityVersionMapping: make(map[string]VersionMapping), + } + r.componentGlobals[component] = &c + return nil +} + +func (r *componentGlobalsRegistry) Register(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) error { + if effectiveVersion == nil { + return fmt.Errorf("cannot register nil effectiveVersion") + } + r.mutex.Lock() + defer r.mutex.Unlock() + return r.unsafeRegister(component, effectiveVersion, featureGate) +} + +func (r *componentGlobalsRegistry) ComponentGlobalsOrRegister(component string, effectiveVersion MutableEffectiveVersion, featureGate featuregate.MutableVersionedFeatureGate) (MutableEffectiveVersion, featuregate.MutableVersionedFeatureGate) { + r.mutex.Lock() + defer r.mutex.Unlock() + globals, ok := r.componentGlobals[component] + if ok { + return globals.effectiveVersion, globals.featureGate + } + utilruntime.Must(r.unsafeRegister(component, effectiveVersion, featureGate)) + return effectiveVersion, featureGate +} + +func (r *componentGlobalsRegistry) unsafeKnownFeatures() []string { + var known []string + for component, globals := range r.componentGlobals { + if globals.featureGate == nil { + continue + } + for _, f := range globals.featureGate.KnownFeatures() { + known = append(known, component+":"+f) + } + } + sort.Strings(known) + return known +} + +func (r *componentGlobalsRegistry) unsafeVersionFlagOptions(isEmulation bool) []string { + var vs []string + for component, globals := range r.componentGlobals { + binaryVer := globals.effectiveVersion.BinaryVersion() + if isEmulation { + if globals.dependentEmulationVersion { + continue + } + // emulated version could be between binaryMajor.{binaryMinor} and binaryMajor.{binaryMinor} + // TODO: change to binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} in 1.32 + vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component, + binaryVer.SubtractMinor(0).String(), binaryVer.String(), globals.effectiveVersion.EmulationVersion().String())) + } else { + if globals.dependentMinCompatibilityVersion { + continue + } + // min compatibility version could be between binaryMajor.{binaryMinor-1} and binaryMajor.{binaryMinor} + vs = append(vs, fmt.Sprintf("%s=%s..%s (default=%s)", component, + binaryVer.SubtractMinor(1).String(), binaryVer.String(), globals.effectiveVersion.MinCompatibilityVersion().String())) + } + } + sort.Strings(vs) + return vs +} + +func (r *componentGlobalsRegistry) AddFlags(fs *pflag.FlagSet) { + if r == nil { + return + } + r.mutex.Lock() + defer r.mutex.Unlock() + for _, globals := range r.componentGlobals { + if globals.featureGate != nil { + globals.featureGate.Close() + } + } + if r.emulationVersionConfig != nil || r.featureGatesConfig != nil { + klog.Warning("calling componentGlobalsRegistry.AddFlags more than once, the registry will be set by the latest flags") + } + r.emulationVersionConfig = []string{} + r.featureGatesConfig = make(map[string][]string) + + fs.StringSliceVar(&r.emulationVersionConfig, "emulated-version", r.emulationVersionConfig, ""+ + "The versions different components emulate their capabilities (APIs, features, ...) of.\n"+ + "If set, the component will emulate the behavior of this version instead of the underlying binary version.\n"+ + "Version format could only be major.minor, for example: '--emulated-version=wardle=1.2,kube=1.31'. Options are:\n"+strings.Join(r.unsafeVersionFlagOptions(true), "\n")+ + "If the component is not specified, defaults to \"kube\"") + + fs.Var(cliflag.NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(&r.featureGatesConfig), "feature-gates", "Comma-separated list of component:key=value pairs that describe feature gates for alpha/experimental features of different components.\n"+ + "If the component is not specified, defaults to \"kube\". This flag can be repeatedly invoked. For example: --feature-gates 'wardle:featureA=true,wardle:featureB=false' --feature-gates 'kube:featureC=true'"+ + "Options are:\n"+strings.Join(r.unsafeKnownFeatures(), "\n")) +} + +type componentVersion struct { + component string + ver *version.Version +} + +// getFullEmulationVersionConfig expands the given version config with version registered version mapping, +// and returns the map of component to Version. +func (r *componentGlobalsRegistry) getFullEmulationVersionConfig( + versionConfigMap map[string]*version.Version) (map[string]*version.Version, error) { + result := map[string]*version.Version{} + setQueue := []componentVersion{} + for comp, ver := range versionConfigMap { + if _, ok := r.componentGlobals[comp]; !ok { + return result, fmt.Errorf("component not registered: %s", comp) + } + klog.V(klogLevel).Infof("setting version %s=%s", comp, ver.String()) + setQueue = append(setQueue, componentVersion{comp, ver}) + } + for len(setQueue) > 0 { + cv := setQueue[0] + if _, visited := result[cv.component]; visited { + return result, fmt.Errorf("setting version of %s more than once, probably version mapping loop", cv.component) + } + setQueue = setQueue[1:] + result[cv.component] = cv.ver + for toComp, f := range r.componentGlobals[cv.component].emulationVersionMapping { + toVer := f(cv.ver) + if toVer == nil { + return result, fmt.Errorf("got nil version from mapping of %s=%s to component:%s", cv.component, cv.ver.String(), toComp) + } + klog.V(klogLevel).Infof("setting version %s=%s from version mapping of %s=%s", toComp, toVer.String(), cv.component, cv.ver.String()) + setQueue = append(setQueue, componentVersion{toComp, toVer}) + } + } + return result, nil +} + +func toVersionMap(versionConfig []string) (map[string]*version.Version, error) { + m := map[string]*version.Version{} + for _, compVer := range versionConfig { + // default to "kube" of component is not specified + k := "kube" + v := compVer + if strings.Contains(compVer, "=") { + arr := strings.SplitN(compVer, "=", 2) + if len(arr) != 2 { + return m, fmt.Errorf("malformed pair, expect string=string") + } + k = strings.TrimSpace(arr[0]) + v = strings.TrimSpace(arr[1]) + } + ver, err := version.Parse(v) + if err != nil { + return m, err + } + if ver.Patch() != 0 { + return m, fmt.Errorf("patch version not allowed, got: %s=%s", k, ver.String()) + } + if existingVer, ok := m[k]; ok { + return m, fmt.Errorf("duplicate version flag, %s=%s and %s=%s", k, existingVer.String(), k, ver.String()) + } + m[k] = ver + } + return m, nil +} + +func (r *componentGlobalsRegistry) SetFallback() error { + r.mutex.Lock() + set := r.set + r.mutex.Unlock() + if set { + return nil + } + klog.Warning("setting componentGlobalsRegistry in SetFallback. We recommend calling componentGlobalsRegistry.Set()" + + " right after parsing flags to avoid using feature gates before their final values are set by the flags.") + return r.Set() +} + +func (r *componentGlobalsRegistry) Set() error { + r.mutex.Lock() + defer r.mutex.Unlock() + r.set = true + emulationVersionConfigMap, err := toVersionMap(r.emulationVersionConfig) + if err != nil { + return err + } + for comp := range emulationVersionConfigMap { + if _, ok := r.componentGlobals[comp]; !ok { + return fmt.Errorf("component not registered: %s", comp) + } + // only components without any dependencies can be set from the flag. + if r.componentGlobals[comp].dependentEmulationVersion { + return fmt.Errorf("EmulationVersion of %s is set by mapping, cannot set it by flag", comp) + } + } + if emulationVersions, err := r.getFullEmulationVersionConfig(emulationVersionConfigMap); err != nil { + return err + } else { + for comp, ver := range emulationVersions { + r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver) + } + } + // Set feature gate emulation version before setting feature gate flag values. + for comp, globals := range r.componentGlobals { + if globals.featureGate == nil { + continue + } + klog.V(klogLevel).Infof("setting %s:feature gate emulation version to %s", comp, globals.effectiveVersion.EmulationVersion().String()) + if err := globals.featureGate.SetEmulationVersion(globals.effectiveVersion.EmulationVersion()); err != nil { + return err + } + } + for comp, fg := range r.featureGatesConfig { + if comp == "" { + if _, ok := r.featureGatesConfig[DefaultKubeComponent]; ok { + return fmt.Errorf("set kube feature gates with default empty prefix or kube: prefix consistently, do not mix use") + } + comp = DefaultKubeComponent + } + if _, ok := r.componentGlobals[comp]; !ok { + return fmt.Errorf("component not registered: %s", comp) + } + featureGate := r.componentGlobals[comp].featureGate + if featureGate == nil { + return fmt.Errorf("component featureGate not registered: %s", comp) + } + flagVal := strings.Join(fg, ",") + klog.V(klogLevel).Infof("setting %s:feature-gates=%s", comp, flagVal) + if err := featureGate.Set(flagVal); err != nil { + return err + } + } + return nil +} + +func (r *componentGlobalsRegistry) Validate() []error { + var errs []error + r.mutex.Lock() + defer r.mutex.Unlock() + for _, globals := range r.componentGlobals { + errs = append(errs, globals.effectiveVersion.Validate()...) + if globals.featureGate != nil { + errs = append(errs, globals.featureGate.Validate()...) + } + } + return errs +} + +func (r *componentGlobalsRegistry) SetEmulationVersionMapping(fromComponent, toComponent string, f VersionMapping) error { + if f == nil { + return nil + } + klog.V(klogLevel).Infof("setting EmulationVersion mapping from %s to %s", fromComponent, toComponent) + r.mutex.Lock() + defer r.mutex.Unlock() + if _, ok := r.componentGlobals[fromComponent]; !ok { + return fmt.Errorf("component not registered: %s", fromComponent) + } + if _, ok := r.componentGlobals[toComponent]; !ok { + return fmt.Errorf("component not registered: %s", toComponent) + } + // check multiple dependency + if r.componentGlobals[toComponent].dependentEmulationVersion { + return fmt.Errorf("mapping of %s already exists from another component", toComponent) + } + r.componentGlobals[toComponent].dependentEmulationVersion = true + + versionMapping := r.componentGlobals[fromComponent].emulationVersionMapping + if _, ok := versionMapping[toComponent]; ok { + return fmt.Errorf("EmulationVersion from %s to %s already exists", fromComponent, toComponent) + } + versionMapping[toComponent] = f + klog.V(klogLevel).Infof("setting the default EmulationVersion of %s based on mapping from the default EmulationVersion of %s", fromComponent, toComponent) + defaultFromVersion := r.componentGlobals[fromComponent].effectiveVersion.EmulationVersion() + emulationVersions, err := r.getFullEmulationVersionConfig(map[string]*version.Version{fromComponent: defaultFromVersion}) + if err != nil { + return err + } + for comp, ver := range emulationVersions { + r.componentGlobals[comp].effectiveVersion.SetEmulationVersion(ver) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/version/version.go b/vendor/k8s.io/apiserver/pkg/util/version/version.go new file mode 100644 index 00000000..694d27a9 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/version/version.go @@ -0,0 +1,181 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "sync/atomic" + + "k8s.io/apimachinery/pkg/util/version" + baseversion "k8s.io/component-base/version" +) + +type EffectiveVersion interface { + BinaryVersion() *version.Version + EmulationVersion() *version.Version + MinCompatibilityVersion() *version.Version + EqualTo(other EffectiveVersion) bool + String() string + Validate() []error +} + +type MutableEffectiveVersion interface { + EffectiveVersion + Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) + SetEmulationVersion(emulationVersion *version.Version) + SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) +} + +type effectiveVersion struct { + // When true, BinaryVersion() returns the current binary version + useDefaultBuildBinaryVersion atomic.Bool + // Holds the last binary version stored in Set() + binaryVersion atomic.Pointer[version.Version] + // If the emulationVersion is set by the users, it could only contain major and minor versions. + // In tests, emulationVersion could be the same as the binary version, or set directly, + // which can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + emulationVersion atomic.Pointer[version.Version] + // minCompatibilityVersion could only contain major and minor versions. + minCompatibilityVersion atomic.Pointer[version.Version] +} + +func (m *effectiveVersion) BinaryVersion() *version.Version { + if m.useDefaultBuildBinaryVersion.Load() { + return defaultBuildBinaryVersion() + } + return m.binaryVersion.Load() +} + +func (m *effectiveVersion) EmulationVersion() *version.Version { + ver := m.emulationVersion.Load() + if ver != nil { + // Emulation version can have "alpha" as pre-release to continue serving expired apis while we clean up the test. + // The pre-release should not be accessible to the users. + return ver.WithPreRelease(m.BinaryVersion().PreRelease()) + } + return ver +} + +func (m *effectiveVersion) MinCompatibilityVersion() *version.Version { + return m.minCompatibilityVersion.Load() +} + +func (m *effectiveVersion) EqualTo(other EffectiveVersion) bool { + return m.BinaryVersion().EqualTo(other.BinaryVersion()) && m.EmulationVersion().EqualTo(other.EmulationVersion()) && m.MinCompatibilityVersion().EqualTo(other.MinCompatibilityVersion()) +} + +func (m *effectiveVersion) String() string { + if m == nil { + return "" + } + return fmt.Sprintf("{BinaryVersion: %s, EmulationVersion: %s, MinCompatibilityVersion: %s}", + m.BinaryVersion().String(), m.EmulationVersion().String(), m.MinCompatibilityVersion().String()) +} + +func majorMinor(ver *version.Version) *version.Version { + if ver == nil { + return ver + } + return version.MajorMinor(ver.Major(), ver.Minor()) +} + +func (m *effectiveVersion) Set(binaryVersion, emulationVersion, minCompatibilityVersion *version.Version) { + m.binaryVersion.Store(binaryVersion) + m.useDefaultBuildBinaryVersion.Store(false) + m.emulationVersion.Store(majorMinor(emulationVersion)) + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) SetEmulationVersion(emulationVersion *version.Version) { + m.emulationVersion.Store(majorMinor(emulationVersion)) +} + +func (m *effectiveVersion) SetMinCompatibilityVersion(minCompatibilityVersion *version.Version) { + m.minCompatibilityVersion.Store(majorMinor(minCompatibilityVersion)) +} + +func (m *effectiveVersion) Validate() []error { + var errs []error + // Validate only checks the major and minor versions. + binaryVersion := m.BinaryVersion().WithPatch(0) + emulationVersion := m.emulationVersion.Load() + minCompatibilityVersion := m.minCompatibilityVersion.Load() + + // emulationVersion can only be 1.{binaryMinor-1}...1.{binaryMinor}. + maxEmuVer := binaryVersion + minEmuVer := binaryVersion.SubtractMinor(1) + if emulationVersion.GreaterThan(maxEmuVer) || emulationVersion.LessThan(minEmuVer) { + errs = append(errs, fmt.Errorf("emulation version %s is not between [%s, %s]", emulationVersion.String(), minEmuVer.String(), maxEmuVer.String())) + } + // minCompatibilityVersion can only be 1.{binaryMinor-1} for alpha. + maxCompVer := binaryVersion.SubtractMinor(1) + minCompVer := binaryVersion.SubtractMinor(1) + if minCompatibilityVersion.GreaterThan(maxCompVer) || minCompatibilityVersion.LessThan(minCompVer) { + errs = append(errs, fmt.Errorf("minCompatibilityVersion version %s is not between [%s, %s]", minCompatibilityVersion.String(), minCompVer.String(), maxCompVer.String())) + } + return errs +} + +func newEffectiveVersion(binaryVersion *version.Version, useDefaultBuildBinaryVersion bool) MutableEffectiveVersion { + effective := &effectiveVersion{} + compatVersion := binaryVersion.SubtractMinor(1) + effective.Set(binaryVersion, binaryVersion, compatVersion) + effective.useDefaultBuildBinaryVersion.Store(useDefaultBuildBinaryVersion) + return effective +} + +func NewEffectiveVersion(binaryVer string) MutableEffectiveVersion { + if binaryVer == "" { + return &effectiveVersion{} + } + binaryVersion := version.MustParse(binaryVer) + return newEffectiveVersion(binaryVersion, false) +} + +func defaultBuildBinaryVersion() *version.Version { + verInfo := baseversion.Get() + return version.MustParse(verInfo.String()).WithInfo(verInfo) +} + +// DefaultBuildEffectiveVersion returns the MutableEffectiveVersion based on the +// current build information. +func DefaultBuildEffectiveVersion() MutableEffectiveVersion { + binaryVersion := defaultBuildBinaryVersion() + if binaryVersion.Major() == 0 && binaryVersion.Minor() == 0 { + return DefaultKubeEffectiveVersion() + } + return newEffectiveVersion(binaryVersion, true) +} + +// DefaultKubeEffectiveVersion returns the MutableEffectiveVersion based on the +// latest K8s release. +func DefaultKubeEffectiveVersion() MutableEffectiveVersion { + binaryVersion := version.MustParse(baseversion.DefaultKubeBinaryVersion).WithInfo(baseversion.Get()) + return newEffectiveVersion(binaryVersion, false) +} + +// ValidateKubeEffectiveVersion validates the EmulationVersion is equal to the binary version at 1.31 for kube components. +// TODO: remove in 1.32 +// emulationVersion is introduced in 1.31, so it is only allowed to be equal to the binary version at 1.31. +func ValidateKubeEffectiveVersion(effectiveVersion EffectiveVersion) error { + binaryVersion := version.MajorMinor(effectiveVersion.BinaryVersion().Major(), effectiveVersion.BinaryVersion().Minor()) + if binaryVersion.EqualTo(version.MajorMinor(1, 31)) && !effectiveVersion.EmulationVersion().EqualTo(binaryVersion) { + return fmt.Errorf("emulation version needs to be equal to binary version(%s) in compatibility-version alpha, got %s", + binaryVersion.String(), effectiveVersion.EmulationVersion().String()) + } + return nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go new file mode 100644 index 00000000..0816b45a --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/authentication.go @@ -0,0 +1,276 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "net" + "net/http" + "os" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/trace" + + corev1 "k8s.io/api/core/v1" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/features" + egressselector "k8s.io/apiserver/pkg/server/egressselector" + "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + tracing "k8s.io/component-base/tracing" +) + +// AuthenticationInfoResolverWrapper can be used to inject Dial function to the +// rest.Config generated by the resolver. +type AuthenticationInfoResolverWrapper func(AuthenticationInfoResolver) AuthenticationInfoResolver + +// NewDefaultAuthenticationInfoResolverWrapper builds a default authn resolver wrapper +func NewDefaultAuthenticationInfoResolverWrapper( + proxyTransport *http.Transport, + egressSelector *egressselector.EgressSelector, + kubeapiserverClientConfig *rest.Config, + tp trace.TracerProvider) AuthenticationInfoResolverWrapper { + + webhookAuthResolverWrapper := func(delegate AuthenticationInfoResolver) AuthenticationInfoResolver { + return &AuthenticationInfoResolverDelegator{ + ClientConfigForFunc: func(hostPort string) (*rest.Config, error) { + if hostPort == "kubernetes.default.svc:443" { + return kubeapiserverClientConfig, nil + } + ret, err := delegate.ClientConfigFor(hostPort) + if err != nil { + return nil, err + } + if feature.DefaultFeatureGate.Enabled(features.APIServerTracing) { + ret.Wrap(tracing.WrapperFor(tp)) + } + + if egressSelector != nil { + networkContext := egressselector.ControlPlane.AsNetworkContext() + var egressDialer utilnet.DialFunc + egressDialer, err = egressSelector.Lookup(networkContext) + + if err != nil { + return nil, err + } + + ret.Dial = egressDialer + } + return ret, nil + }, + ClientConfigForServiceFunc: func(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) { + if serviceName == "kubernetes" && serviceNamespace == corev1.NamespaceDefault && servicePort == 443 { + return kubeapiserverClientConfig, nil + } + ret, err := delegate.ClientConfigForService(serviceName, serviceNamespace, servicePort) + if err != nil { + return nil, err + } + if feature.DefaultFeatureGate.Enabled(features.APIServerTracing) { + ret.Wrap(tracing.WrapperFor(tp)) + } + + if egressSelector != nil { + networkContext := egressselector.Cluster.AsNetworkContext() + var egressDialer utilnet.DialFunc + egressDialer, err = egressSelector.Lookup(networkContext) + if err != nil { + return nil, err + } + + ret.Dial = egressDialer + } else if proxyTransport != nil && proxyTransport.DialContext != nil { + ret.Dial = proxyTransport.DialContext + } + return ret, nil + }, + } + } + return webhookAuthResolverWrapper +} + +// AuthenticationInfoResolver builds rest.Config base on the server or service +// name and service namespace. +type AuthenticationInfoResolver interface { + // ClientConfigFor builds rest.Config based on the hostPort. + ClientConfigFor(hostPort string) (*rest.Config, error) + // ClientConfigForService builds rest.Config based on the serviceName and + // serviceNamespace. + ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) +} + +// AuthenticationInfoResolverDelegator implements AuthenticationInfoResolver. +type AuthenticationInfoResolverDelegator struct { + ClientConfigForFunc func(hostPort string) (*rest.Config, error) + ClientConfigForServiceFunc func(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) +} + +// ClientConfigFor returns client config for given hostPort. +func (a *AuthenticationInfoResolverDelegator) ClientConfigFor(hostPort string) (*rest.Config, error) { + return a.ClientConfigForFunc(hostPort) +} + +// ClientConfigForService returns client config for given service. +func (a *AuthenticationInfoResolverDelegator) ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) { + return a.ClientConfigForServiceFunc(serviceName, serviceNamespace, servicePort) +} + +type defaultAuthenticationInfoResolver struct { + kubeconfig clientcmdapi.Config +} + +// NewDefaultAuthenticationInfoResolver generates an AuthenticationInfoResolver +// that builds rest.Config based on the kubeconfig file. kubeconfigFile is the +// path to the kubeconfig. +func NewDefaultAuthenticationInfoResolver(kubeconfigFile string) (AuthenticationInfoResolver, error) { + if len(kubeconfigFile) == 0 { + return &defaultAuthenticationInfoResolver{}, nil + } + + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.ExplicitPath = kubeconfigFile + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + clientConfig, err := loader.RawConfig() + if err != nil { + return nil, err + } + + return &defaultAuthenticationInfoResolver{kubeconfig: clientConfig}, nil +} + +func (c *defaultAuthenticationInfoResolver) ClientConfigFor(hostPort string) (*rest.Config, error) { + return c.clientConfig(hostPort) +} + +func (c *defaultAuthenticationInfoResolver) ClientConfigForService(serviceName, serviceNamespace string, servicePort int) (*rest.Config, error) { + return c.clientConfig(net.JoinHostPort(serviceName+"."+serviceNamespace+".svc", strconv.Itoa(servicePort))) +} + +func (c *defaultAuthenticationInfoResolver) clientConfig(target string) (*rest.Config, error) { + // exact match + if authConfig, ok := c.kubeconfig.AuthInfos[target]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // star prefixed match + serverSteps := strings.Split(target, ".") + for i := 1; i < len(serverSteps); i++ { + nickName := "*." + strings.Join(serverSteps[i:], ".") + if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok { + return restConfigFromKubeconfig(authConfig) + } + } + + // If target included the default https port (443), search again without the port + if target, port, err := net.SplitHostPort(target); err == nil && port == "443" { + // exact match without port + if authConfig, ok := c.kubeconfig.AuthInfos[target]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // star prefixed match without port + serverSteps := strings.Split(target, ".") + for i := 1; i < len(serverSteps); i++ { + nickName := "*." + strings.Join(serverSteps[i:], ".") + if authConfig, ok := c.kubeconfig.AuthInfos[nickName]; ok { + return restConfigFromKubeconfig(authConfig) + } + } + } + + // if we're trying to hit the kube-apiserver and there wasn't an explicit config, use the in-cluster config + if target == "kubernetes.default.svc:443" { + // if we can find an in-cluster-config use that. If we can't, fall through. + inClusterConfig, err := rest.InClusterConfig() + if err == nil { + return setGlobalDefaults(inClusterConfig), nil + } + } + + // star (default) match + if authConfig, ok := c.kubeconfig.AuthInfos["*"]; ok { + return restConfigFromKubeconfig(authConfig) + } + + // use the current context from the kubeconfig if possible + if len(c.kubeconfig.CurrentContext) > 0 { + if currContext, ok := c.kubeconfig.Contexts[c.kubeconfig.CurrentContext]; ok { + if len(currContext.AuthInfo) > 0 { + if currAuth, ok := c.kubeconfig.AuthInfos[currContext.AuthInfo]; ok { + return restConfigFromKubeconfig(currAuth) + } + } + } + } + + // anonymous + return setGlobalDefaults(&rest.Config{}), nil +} + +func restConfigFromKubeconfig(configAuthInfo *clientcmdapi.AuthInfo) (*rest.Config, error) { + config := &rest.Config{} + + // blindly overwrite existing values based on precedence + if len(configAuthInfo.Token) > 0 { + config.BearerToken = configAuthInfo.Token + config.BearerTokenFile = configAuthInfo.TokenFile + } else if len(configAuthInfo.TokenFile) > 0 { + tokenBytes, err := os.ReadFile(configAuthInfo.TokenFile) + if err != nil { + return nil, err + } + config.BearerToken = string(tokenBytes) + config.BearerTokenFile = configAuthInfo.TokenFile + } + if len(configAuthInfo.Impersonate) > 0 { + config.Impersonate = rest.ImpersonationConfig{ + UserName: configAuthInfo.Impersonate, + UID: configAuthInfo.ImpersonateUID, + Groups: configAuthInfo.ImpersonateGroups, + Extra: configAuthInfo.ImpersonateUserExtra, + } + } + if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { + config.CertFile = configAuthInfo.ClientCertificate + config.CertData = configAuthInfo.ClientCertificateData + config.KeyFile = configAuthInfo.ClientKey + config.KeyData = configAuthInfo.ClientKeyData + } + if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { + config.Username = configAuthInfo.Username + config.Password = configAuthInfo.Password + } + if configAuthInfo.Exec != nil { + config.ExecProvider = configAuthInfo.Exec.DeepCopy() + } + if configAuthInfo.AuthProvider != nil { + return nil, fmt.Errorf("auth provider not supported") + } + + return setGlobalDefaults(config), nil +} + +func setGlobalDefaults(config *rest.Config) *rest.Config { + config.UserAgent = "kube-apiserver-admission" + config.Timeout = 30 * time.Second + + return config +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/client.go b/vendor/k8s.io/apiserver/pkg/util/webhook/client.go new file mode 100644 index 00000000..63ea4e26 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/client.go @@ -0,0 +1,257 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/url" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apiserver/pkg/util/x509metrics" + "k8s.io/client-go/rest" + "k8s.io/utils/lru" + netutils "k8s.io/utils/net" +) + +const ( + defaultCacheSize = 200 +) + +// ClientConfig defines parameters required for creating a hook client. +type ClientConfig struct { + Name string + URL string + CABundle []byte + Service *ClientConfigService +} + +// ClientConfigService defines service discovery parameters of the webhook. +type ClientConfigService struct { + Name string + Namespace string + Path string + Port int32 +} + +// ClientManager builds REST clients to talk to webhooks. It caches the clients +// to avoid duplicate creation. +type ClientManager struct { + authInfoResolver AuthenticationInfoResolver + serviceResolver ServiceResolver + negotiatedSerializer runtime.NegotiatedSerializer + cache *lru.Cache +} + +// NewClientManager creates a clientManager. +func NewClientManager(gvs []schema.GroupVersion, addToSchemaFuncs ...func(s *runtime.Scheme) error) (ClientManager, error) { + cache := lru.New(defaultCacheSize) + hookScheme := runtime.NewScheme() + for _, addToSchemaFunc := range addToSchemaFuncs { + if err := addToSchemaFunc(hookScheme); err != nil { + return ClientManager{}, err + } + } + return ClientManager{ + cache: cache, + negotiatedSerializer: serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{ + Serializer: serializer.NewCodecFactory(hookScheme).LegacyCodec(gvs...), + }), + }, nil +} + +// SetAuthenticationInfoResolverWrapper sets the +// AuthenticationInfoResolverWrapper. +func (cm *ClientManager) SetAuthenticationInfoResolverWrapper(wrapper AuthenticationInfoResolverWrapper) { + if wrapper != nil { + cm.authInfoResolver = wrapper(cm.authInfoResolver) + } +} + +// SetAuthenticationInfoResolver sets the AuthenticationInfoResolver. +func (cm *ClientManager) SetAuthenticationInfoResolver(resolver AuthenticationInfoResolver) { + cm.authInfoResolver = resolver +} + +// SetServiceResolver sets the ServiceResolver. +func (cm *ClientManager) SetServiceResolver(sr ServiceResolver) { + if sr != nil { + cm.serviceResolver = sr + } +} + +// Validate checks if ClientManager is properly set up. +func (cm *ClientManager) Validate() error { + var errs []error + if cm.negotiatedSerializer == nil { + errs = append(errs, fmt.Errorf("the clientManager requires a negotiatedSerializer")) + } + if cm.serviceResolver == nil { + errs = append(errs, fmt.Errorf("the clientManager requires a serviceResolver")) + } + if cm.authInfoResolver == nil { + errs = append(errs, fmt.Errorf("the clientManager requires an authInfoResolver")) + } + return utilerrors.NewAggregate(errs) +} + +// HookClient get a RESTClient from the cache, or constructs one based on the +// webhook configuration. +func (cm *ClientManager) HookClient(cc ClientConfig) (*rest.RESTClient, error) { + ccWithNoName := cc + ccWithNoName.Name = "" + cacheKey, err := json.Marshal(ccWithNoName) + if err != nil { + return nil, err + } + if client, ok := cm.cache.Get(string(cacheKey)); ok { + return client.(*rest.RESTClient), nil + } + + cfg, err := cm.hookClientConfig(cc) + if err != nil { + return nil, err + } + + client, err := rest.UnversionedRESTClientFor(cfg) + if err == nil { + cm.cache.Add(string(cacheKey), client) + } + return client, err +} + +func (cm *ClientManager) hookClientConfig(cc ClientConfig) (*rest.Config, error) { + complete := func(cfg *rest.Config) (*rest.Config, error) { + // Avoid client-side rate limiting talking to the webhook backend. + // Rate limiting should happen when deciding how many requests to serve. + cfg.QPS = -1 + + // Combine CAData from the config with any existing CA bundle provided + if len(cfg.TLSClientConfig.CAData) > 0 { + cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, '\n') + } + cfg.TLSClientConfig.CAData = append(cfg.TLSClientConfig.CAData, cc.CABundle...) + + cfg.ContentConfig.NegotiatedSerializer = cm.negotiatedSerializer + cfg.ContentConfig.ContentType = runtime.ContentTypeJSON + + // Add a transport wrapper that allows detection of TLS connections to + // servers with serving certificates with deprecated characteristics + cfg.Wrap(x509metrics.NewDeprecatedCertificateRoundTripperWrapperConstructor( + x509MissingSANCounter, + x509InsecureSHA1Counter, + )) + return cfg, nil + } + + if cc.Service != nil { + port := cc.Service.Port + if port == 0 { + // Default to port 443 if no service port is specified + port = 443 + } + + restConfig, err := cm.authInfoResolver.ClientConfigForService(cc.Service.Name, cc.Service.Namespace, int(port)) + if err != nil { + return nil, err + } + cfg := rest.CopyConfig(restConfig) + + // Use http/1.1 instead of http/2. + // This is a workaround for http/2-enabled clients not load-balancing concurrent requests to multiple backends. + // See https://issue.k8s.io/75791 for details. + cfg.NextProtos = []string{"http/1.1"} + + serverName := cc.Service.Name + "." + cc.Service.Namespace + ".svc" + + host := net.JoinHostPort(serverName, strconv.Itoa(int(port))) + cfg.Host = "https://" + host + cfg.APIPath = cc.Service.Path + // Set the server name if not already set + if len(cfg.TLSClientConfig.ServerName) == 0 { + cfg.TLSClientConfig.ServerName = serverName + } + + delegateDialer := cfg.Dial + if delegateDialer == nil { + var d net.Dialer + delegateDialer = d.DialContext + } + cfg.Dial = func(ctx context.Context, network, addr string) (net.Conn, error) { + if addr == host { + u, err := cm.serviceResolver.ResolveEndpoint(cc.Service.Namespace, cc.Service.Name, port) + if err != nil { + return nil, err + } + addr = u.Host + } + return delegateDialer(ctx, network, addr) + } + + return complete(cfg) + } + + if cc.URL == "" { + return nil, &ErrCallingWebhook{WebhookName: cc.Name, Reason: errors.New("webhook configuration must have either service or URL")} + } + + u, err := url.Parse(cc.URL) + if err != nil { + return nil, &ErrCallingWebhook{WebhookName: cc.Name, Reason: fmt.Errorf("Unparsable URL: %v", err)} + } + + hostPort := u.Host + if len(u.Port()) == 0 { + // Default to port 443 if no port is specified + hostPort = net.JoinHostPort(hostPort, "443") + } + + restConfig, err := cm.authInfoResolver.ClientConfigFor(hostPort) + if err != nil { + return nil, err + } + + cfg := rest.CopyConfig(restConfig) + cfg.Host = u.Scheme + "://" + u.Host + cfg.APIPath = u.Path + if !isLocalHost(u) { + cfg.NextProtos = []string{"http/1.1"} + } + + return complete(cfg) +} + +func isLocalHost(u *url.URL) bool { + host := u.Hostname() + if strings.EqualFold(host, "localhost") { + return true + } + + netIP := netutils.ParseIPSloppy(host) + if netIP != nil { + return netIP.IsLoopback() + } + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/error.go b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go new file mode 100644 index 00000000..3c5d39ae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/error.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +// ErrCallingWebhook is returned for transport-layer errors calling webhooks. It +// represents a failure to talk to the webhook, not the webhook rejecting a +// request. +type ErrCallingWebhook struct { + WebhookName string + Reason error + Status *apierrors.StatusError +} + +func (e *ErrCallingWebhook) Error() string { + if e.Reason != nil { + return fmt.Sprintf("failed calling webhook %q: %v", e.WebhookName, e.Reason) + } + return fmt.Sprintf("failed calling webhook %q; no further details available", e.WebhookName) +} + +// ErrWebhookRejection represents a webhook properly rejecting a request. +type ErrWebhookRejection struct { + Status *apierrors.StatusError +} + +func (e *ErrWebhookRejection) Error() string { + return e.Status.Error() +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh b/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh new file mode 100644 index 00000000..a6426a95 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/gencerts.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# gencerts.sh generates the certificates for the webhook tests. +# +# It is not expected to be run often (there is no go generate rule), and mainly +# exists for documentation purposes. + +CN_BASE="webhook_tests" + +cat > intermediate_ca.conf << EOF +[ v3_ca ] +subjectKeyIdentifier=hash +authorityKeyIdentifier=keyid:always,issuer +basicConstraints = critical,CA:true +keyUsage = cRLSign, keyCertSign +EOF + +cat > server.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 +DNS.1 = localhost +EOF + +cat > server_no_san.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, serverAuth +EOF + +cat > client.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth, serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 +EOF + +# Create a certificate authority +openssl genrsa -out caKey.pem 2048 +openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=${CN_BASE}_ca" + +# Create a second certificate authority +openssl genrsa -out badCAKey.pem 2048 +openssl req -x509 -new -nodes -key badCAKey.pem -days 100000 -out badCACert.pem -subj "/CN=${CN_BASE}_ca" + +# Create an intermediate certificate authority +openssl genrsa -out caKeyInter.pem 2048 +openssl req -new -nodes -key caKeyInter.pem -days 100000 -out caCertInter.csr -subj "/CN=${CN_BASE}_intermediate_ca" +openssl x509 -req -in caCertInter.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out caCertInter.pem -days 100000 -extensions v3_ca -extfile intermediate_ca.conf + +# Create an intermediate certificate authority with sha1 signature +openssl req -new -nodes -key caKeyInter.pem -days 100000 -out caCertInterSHA1.csr -subj "/CN=${CN_BASE}_intermediate_ca" +openssl x509 -sha1 -req -in caCertInterSHA1.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out caCertInterSHA1.pem -days 100000 -extensions v3_ca -extfile intermediate_ca.conf + +# Create a server certiticate +openssl genrsa -out serverKey.pem 2048 +openssl req -new -key serverKey.pem -out server.csr -subj "/CN=${CN_BASE}_server" -config server.conf +openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf + +# Create a server certiticate w/o SAN +openssl req -new -key serverKey.pem -out serverNoSAN.csr -subj "/CN=localhost" -config server_no_san.conf +openssl x509 -req -in serverNoSAN.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCertNoSAN.pem -days 100000 -extensions v3_req -extfile server_no_san.conf + +# Create a server certiticate with SHA1 signature signed by OK intermediate CA +openssl req -new -key serverKey.pem -out serverSHA1.csr -subj "/CN=localhost" -config server.conf +openssl x509 -sha1 -req -in serverSHA1.csr -CA caCertInter.pem -CAkey caKeyInter.pem -CAcreateserial -out sha1ServerCertInter.pem -days 100000 -extensions v3_req -extfile server.conf + +# Create a server certiticate signed by SHA1-signed intermediate CA +openssl req -new -key serverKey.pem -out serverInterSHA1.csr -subj "/CN=localhost" -config server.conf +openssl x509 -req -in serverInterSHA1.csr -CA caCertInterSHA1.pem -CAkey caKeyInter.pem -CAcreateserial -out serverCertInterSHA1.pem -days 100000 -extensions v3_req -extfile server.conf + +# Create a client certiticate +openssl genrsa -out clientKey.pem 2048 +openssl req -new -key clientKey.pem -out client.csr -subj "/CN=${CN_BASE}_client" -config client.conf +openssl x509 -req -in client.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out clientCert.pem -days 100000 -extensions v3_req -extfile client.conf + +outfile=certs_test.go + +cat > $outfile << EOF +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated using openssl by the gencerts.sh script +// and holds raw certificates for the webhook tests. + +package webhook +EOF + +for file in caKey caCert badCAKey badCACert caCertInter caCertInterSHA1 serverKey serverCert serverCertNoSAN clientKey clientCert sha1ServerCertInter serverCertInterSHA1; do + data=$(cat ${file}.pem) + echo "" >> $outfile + echo "var $file = []byte(\`$data\`)" >> $outfile +done + +# Clean up after we're done. +rm ./*.pem +rm ./*.csr +rm ./*.srl +rm ./*.conf diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go b/vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go new file mode 100644 index 00000000..67fc8fab --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/metrics.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +var x509MissingSANCounter = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: "webhooks", + Namespace: "apiserver", + Name: "x509_missing_san_total", + Help: "Counts the number of requests to servers missing SAN extension " + + "in their serving certificate OR the number of connection failures " + + "due to the lack of x509 certificate SAN extension missing " + + "(either/or, based on the runtime environment)", + StabilityLevel: metrics.ALPHA, + }, +) + +var x509InsecureSHA1Counter = metrics.NewCounter( + &metrics.CounterOpts{ + Subsystem: "webhooks", + Namespace: "apiserver", + Name: "x509_insecure_sha1_total", + Help: "Counts the number of requests to servers with insecure SHA1 signatures " + + "in their serving certificate OR the number of connection failures " + + "due to the insecure SHA1 signatures (either/or, based on the runtime environment)", + StabilityLevel: metrics.ALPHA, + }, +) + +func init() { + legacyregistry.MustRegister(x509MissingSANCounter) + legacyregistry.MustRegister(x509InsecureSHA1Counter) +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go b/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go new file mode 100644 index 00000000..fcd953da --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/serviceresolver.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "errors" + "fmt" + "net/url" +) + +// ServiceResolver knows how to convert a service reference into an actual location. +type ServiceResolver interface { + ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) +} + +type defaultServiceResolver struct{} + +// NewDefaultServiceResolver creates a new default server resolver. +func NewDefaultServiceResolver() ServiceResolver { + return &defaultServiceResolver{} +} + +// ResolveEndpoint constructs a service URL from a given namespace and name +// note that the name, namespace, and port are required and by default all +// created addresses use HTTPS scheme. +// for example: +// +// name=ross namespace=andromeda resolves to https://ross.andromeda.svc:443 +func (sr defaultServiceResolver) ResolveEndpoint(namespace, name string, port int32) (*url.URL, error) { + if len(name) == 0 || len(namespace) == 0 || port == 0 { + return nil, errors.New("cannot resolve an empty service name or namespace or port") + } + return &url.URL{Scheme: "https", Host: fmt.Sprintf("%s.%s.svc:%d", name, namespace, port)}, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go new file mode 100644 index 00000000..9cf258b7 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/validation.go @@ -0,0 +1,115 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/transport" +) + +func ValidateCABundle(fldPath *field.Path, caBundle []byte) field.ErrorList { + var allErrors field.ErrorList + _, err := transport.TLSConfigFor(&transport.Config{TLS: transport.TLSConfig{CAData: caBundle}}) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, caBundle, err.Error())) + } + return allErrors +} + +// ValidateWebhookURL validates webhook's URL. +func ValidateWebhookURL(fldPath *field.Path, URL string, forceHttps bool) field.ErrorList { + var allErrors field.ErrorList + const form = "; desired format: https://host[/path]" + if u, err := url.Parse(URL); err != nil { + allErrors = append(allErrors, field.Required(fldPath, "url must be a valid URL: "+err.Error()+form)) + } else { + if forceHttps && u.Scheme != "https" { + allErrors = append(allErrors, field.Invalid(fldPath, u.Scheme, "'https' is the only allowed URL scheme"+form)) + } + if len(u.Host) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Host, "host must be specified"+form)) + } + if u.User != nil { + allErrors = append(allErrors, field.Invalid(fldPath, u.User.String(), "user information is not permitted in the URL")) + } + if len(u.Fragment) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.Fragment, "fragments are not permitted in the URL")) + } + if len(u.RawQuery) != 0 { + allErrors = append(allErrors, field.Invalid(fldPath, u.RawQuery, "query parameters are not permitted in the URL")) + } + } + return allErrors +} + +func ValidateWebhookService(fldPath *field.Path, namespace, name string, path *string, port int32) field.ErrorList { + var allErrors field.ErrorList + + if len(name) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("name"), "service name is required")) + } + + if len(namespace) == 0 { + allErrors = append(allErrors, field.Required(fldPath.Child("namespace"), "service namespace is required")) + } + + if errs := validation.IsValidPortNum(int(port)); errs != nil { + allErrors = append(allErrors, field.Invalid(fldPath.Child("port"), port, "port is not valid: "+strings.Join(errs, ", "))) + } + + if path == nil { + return allErrors + } + + // TODO: replace below with url.Parse + verifying that host is empty? + + urlPath := *path + if urlPath == "/" || len(urlPath) == 0 { + return allErrors + } + if urlPath == "//" { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "segment[0] may not be empty")) + return allErrors + } + + if !strings.HasPrefix(urlPath, "/") { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, "must start with a '/'")) + } + + urlPathToCheck := urlPath[1:] + if strings.HasSuffix(urlPathToCheck, "/") { + urlPathToCheck = urlPathToCheck[:len(urlPathToCheck)-1] + } + steps := strings.Split(urlPathToCheck, "/") + for i, step := range steps { + if len(step) == 0 { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d] may not be empty", i))) + continue + } + failures := validation.IsDNS1123Subdomain(step) + for _, failure := range failures { + allErrors = append(allErrors, field.Invalid(fldPath.Child("path"), urlPath, fmt.Sprintf("segment[%d]: %v", i, failure))) + } + } + + return allErrors +} diff --git a/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go new file mode 100644 index 00000000..b03640ae --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go @@ -0,0 +1,170 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements a generic HTTP webhook plugin. +package webhook + +import ( + "context" + "fmt" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/util/x509metrics" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// defaultRequestTimeout is set for all webhook request. This is the absolute +// timeout of the HTTP request, including reading the response body. +const defaultRequestTimeout = 30 * time.Second + +// DefaultRetryBackoffWithInitialDelay returns the default backoff parameters for webhook retry from a given initial delay. +// Handy for the client that provides a custom initial delay only. +func DefaultRetryBackoffWithInitialDelay(initialBackoffDelay time.Duration) wait.Backoff { + return wait.Backoff{ + Duration: initialBackoffDelay, + Factor: 1.5, + Jitter: 0.2, + Steps: 5, + } +} + +// GenericWebhook defines a generic client for webhooks with commonly used capabilities, +// such as retry requests. +type GenericWebhook struct { + RestClient *rest.RESTClient + RetryBackoff wait.Backoff + ShouldRetry func(error) bool +} + +// DefaultShouldRetry is a default implementation for the GenericWebhook ShouldRetry function property. +// If the error reason is one of: networking (connection reset) or http (InternalServerError (500), GatewayTimeout (504), TooManyRequests (429)), +// or apierrors.SuggestsClientDelay() returns true, then the function advises a retry. +// Otherwise it returns false for an immediate fail. +func DefaultShouldRetry(err error) bool { + // these errors indicate a transient error that should be retried. + if utilnet.IsConnectionReset(err) || utilnet.IsHTTP2ConnectionLost(err) || apierrors.IsInternalError(err) || apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { + return true + } + // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. + if _, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { + return true + } + return false +} + +// NewGenericWebhook creates a new GenericWebhook from the provided rest.Config. +func NewGenericWebhook(scheme *runtime.Scheme, codecFactory serializer.CodecFactory, config *rest.Config, groupVersions []schema.GroupVersion, retryBackoff wait.Backoff) (*GenericWebhook, error) { + for _, groupVersion := range groupVersions { + if !scheme.IsVersionRegistered(groupVersion) { + return nil, fmt.Errorf("webhook plugin requires enabling extension resource: %s", groupVersion) + } + } + + clientConfig := rest.CopyConfig(config) + + codec := codecFactory.LegacyCodec(groupVersions...) + clientConfig.ContentConfig.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) + + clientConfig.Wrap(x509metrics.NewDeprecatedCertificateRoundTripperWrapperConstructor( + x509MissingSANCounter, + x509InsecureSHA1Counter, + )) + + restClient, err := rest.UnversionedRESTClientFor(clientConfig) + if err != nil { + return nil, err + } + + return &GenericWebhook{restClient, retryBackoff, DefaultShouldRetry}, nil +} + +// WithExponentialBackoff will retry webhookFn() as specified by the given backoff parameters with exponentially +// increasing backoff when it returns an error for which this GenericWebhook's ShouldRetry function returns true, +// confirming it to be retriable. If no ShouldRetry has been defined for the webhook, +// then the default one is used (DefaultShouldRetry). +func (g *GenericWebhook) WithExponentialBackoff(ctx context.Context, webhookFn func() rest.Result) rest.Result { + var result rest.Result + shouldRetry := g.ShouldRetry + if shouldRetry == nil { + shouldRetry = DefaultShouldRetry + } + WithExponentialBackoff(ctx, g.RetryBackoff, func() error { + result = webhookFn() + return result.Error() + }, shouldRetry) + return result +} + +// WithExponentialBackoff will retry webhookFn up to 5 times with exponentially increasing backoff when +// it returns an error for which shouldRetry returns true, confirming it to be retriable. +func WithExponentialBackoff(ctx context.Context, retryBackoff wait.Backoff, webhookFn func() error, shouldRetry func(error) bool) error { + // having a webhook error allows us to track the last actual webhook error for requests that + // are later cancelled or time out. + var webhookErr error + err := wait.ExponentialBackoffWithContext(ctx, retryBackoff, func(_ context.Context) (bool, error) { + webhookErr = webhookFn() + if shouldRetry(webhookErr) { + return false, nil + } + if webhookErr != nil { + return false, webhookErr + } + return true, nil + }) + + switch { + // we check for webhookErr first, if webhookErr is set it's the most important error to return. + case webhookErr != nil: + return webhookErr + case err != nil: + return fmt.Errorf("webhook call failed: %s", err.Error()) + default: + return nil + } +} + +func LoadKubeconfig(kubeConfigFile string, customDial utilnet.DialFunc) (*rest.Config, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + loadingRules.ExplicitPath = kubeConfigFile + loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + + clientConfig, err := loader.ClientConfig() + if err != nil { + return nil, err + } + + clientConfig.Dial = customDial + + // Kubeconfigs can't set a timeout, this can only be set through a command line flag. + // + // https://github.com/kubernetes/client-go/blob/master/tools/clientcmd/overrides.go + // + // Set this to something reasonable so request to webhooks don't hang forever. + clientConfig.Timeout = defaultRequestTimeout + + // Avoid client-side rate limiting talking to the webhook backend. + // Rate limiting should happen when deciding how many requests to serve. + clientConfig.QPS = -1 + + return clientConfig, nil +} diff --git a/vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go b/vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go new file mode 100644 index 00000000..464510ea --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/util/x509metrics/server_cert_deprecations.go @@ -0,0 +1,225 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package x509metrics + +import ( + "crypto/x509" + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apiserver/pkg/audit" + "k8s.io/component-base/metrics" + "k8s.io/klog/v2" +) + +var _ utilnet.RoundTripperWrapper = &x509DeprecatedCertificateMetricsRTWrapper{} + +type x509DeprecatedCertificateMetricsRTWrapper struct { + rt http.RoundTripper + + checkers []deprecatedCertificateAttributeChecker +} + +type deprecatedCertificateAttributeChecker interface { + // CheckRoundTripError returns true if the err is an error specific + // to this deprecated certificate attribute + CheckRoundTripError(err error) bool + // CheckPeerCertificates returns true if the deprecated attribute/value pair + // was found in a given certificate in the http.Response.TLS.PeerCertificates bundle + CheckPeerCertificates(certs []*x509.Certificate) bool + // IncreaseCounter increases the counter internal to this interface + // Use the req to derive and log information useful for troubleshooting the certificate issue + IncreaseMetricsCounter(req *http.Request) +} + +// counterRaiser is a helper structure to include in certificate deprecation checkers. +// It implements the IncreaseMetricsCounter() method so that, when included in the checker, +// it does not have to be reimplemented. +type counterRaiser struct { + counter *metrics.Counter + // programmatic id used in log and audit annotations prefixes + id string + // human readable explanation + reason string +} + +func (c *counterRaiser) IncreaseMetricsCounter(req *http.Request) { + if req != nil && req.URL != nil { + if hostname := req.URL.Hostname(); len(hostname) > 0 { + prefix := fmt.Sprintf("%s.invalid-cert.kubernetes.io", c.id) + klog.Infof("%s: invalid certificate detected connecting to %q: %s", prefix, hostname, c.reason) + audit.AddAuditAnnotation(req.Context(), prefix+"/"+hostname, c.reason) + } + } + c.counter.Inc() +} + +// NewDeprecatedCertificateRoundTripperWrapperConstructor returns a RoundTripper wrapper that's usable within ClientConfig.Wrap. +// +// It increases the `missingSAN` counter whenever: +// 1. we get a x509.HostnameError with string `x509: certificate relies on legacy Common Name field` +// which indicates an error caused by the deprecation of Common Name field when veryfing remote +// hostname +// 2. the server certificate in response contains no SAN. This indicates that this binary run +// with the GODEBUG=x509ignoreCN=0 in env +// +// It increases the `sha1` counter whenever: +// 1. we get a x509.InsecureAlgorithmError with string `SHA1` +// which indicates an error caused by an insecure SHA1 signature +// 2. the server certificate in response contains a SHA1WithRSA or ECDSAWithSHA1 signature. +// This indicates that this binary run with the GODEBUG=x509sha1=1 in env +func NewDeprecatedCertificateRoundTripperWrapperConstructor(missingSAN, sha1 *metrics.Counter) func(rt http.RoundTripper) http.RoundTripper { + return func(rt http.RoundTripper) http.RoundTripper { + return &x509DeprecatedCertificateMetricsRTWrapper{ + rt: rt, + checkers: []deprecatedCertificateAttributeChecker{ + NewSANDeprecatedChecker(missingSAN), + NewSHA1SignatureDeprecatedChecker(sha1), + }, + } + } +} + +func (w *x509DeprecatedCertificateMetricsRTWrapper) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := w.rt.RoundTrip(req) + + if err != nil { + for _, checker := range w.checkers { + if checker.CheckRoundTripError(err) { + checker.IncreaseMetricsCounter(req) + } + } + } else if resp != nil { + if resp.TLS != nil && len(resp.TLS.PeerCertificates) > 0 { + for _, checker := range w.checkers { + if checker.CheckPeerCertificates(resp.TLS.PeerCertificates) { + checker.IncreaseMetricsCounter(req) + } + } + } + } + + return resp, err +} + +func (w *x509DeprecatedCertificateMetricsRTWrapper) WrappedRoundTripper() http.RoundTripper { + return w.rt +} + +var _ deprecatedCertificateAttributeChecker = &missingSANChecker{} + +type missingSANChecker struct { + counterRaiser +} + +func NewSANDeprecatedChecker(counter *metrics.Counter) *missingSANChecker { + return &missingSANChecker{ + counterRaiser: counterRaiser{ + counter: counter, + id: "missing-san", + reason: "relies on a legacy Common Name field instead of the SAN extension for subject validation", + }, + } +} + +// CheckRoundTripError returns true when we're running w/o GODEBUG=x509ignoreCN=0 +// and the client reports a HostnameError about the legacy CN fields +func (c *missingSANChecker) CheckRoundTripError(err error) bool { + if err != nil && errors.As(err, &x509.HostnameError{}) && strings.Contains(err.Error(), "x509: certificate relies on legacy Common Name field") { + // increase the count of registered failures due to Go 1.15 x509 cert Common Name deprecation + return true + } + + return false +} + +// CheckPeerCertificates returns true when the server response contains +// a leaf certificate w/o the SAN extension +func (c *missingSANChecker) CheckPeerCertificates(peerCertificates []*x509.Certificate) bool { + if len(peerCertificates) > 0 { + if serverCert := peerCertificates[0]; !hasSAN(serverCert) { + return true + } + } + + return false +} + +func hasSAN(c *x509.Certificate) bool { + sanOID := []int{2, 5, 29, 17} + + for _, e := range c.Extensions { + if e.Id.Equal(sanOID) { + return true + } + } + return false +} + +type sha1SignatureChecker struct { + *counterRaiser +} + +func NewSHA1SignatureDeprecatedChecker(counter *metrics.Counter) *sha1SignatureChecker { + return &sha1SignatureChecker{ + counterRaiser: &counterRaiser{ + counter: counter, + id: "insecure-sha1", + reason: "uses an insecure SHA-1 signature", + }, + } +} + +// CheckRoundTripError returns true when we're running w/o GODEBUG=x509sha1=1 +// and the client reports an InsecureAlgorithmError about a SHA1 signature +func (c *sha1SignatureChecker) CheckRoundTripError(err error) bool { + var unknownAuthorityError x509.UnknownAuthorityError + if err == nil { + return false + } + if !errors.As(err, &unknownAuthorityError) { + return false + } + + errMsg := err.Error() + if strIdx := strings.Index(errMsg, "x509: cannot verify signature: insecure algorithm"); strIdx != -1 && strings.Contains(errMsg[strIdx:], "SHA1") { + // increase the count of registered failures due to Go 1.18 x509 sha1 signature deprecation + return true + } + + return false +} + +// CheckPeerCertificates returns true when the server response contains +// a non-root non-self-signed certificate with a deprecated SHA1 signature +func (c *sha1SignatureChecker) CheckPeerCertificates(peerCertificates []*x509.Certificate) bool { + // check all received non-self-signed certificates for deprecated signing algorithms + for _, cert := range peerCertificates { + if cert.SignatureAlgorithm == x509.SHA1WithRSA || cert.SignatureAlgorithm == x509.ECDSAWithSHA1 { + // the SHA-1 deprecation does not involve self-signed root certificates + if !reflect.DeepEqual(cert.Issuer, cert.Subject) { + return true + } + } + } + + return false +} diff --git a/vendor/k8s.io/apiserver/pkg/warning/context.go b/vendor/k8s.io/apiserver/pkg/warning/context.go new file mode 100644 index 00000000..20092254 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/warning/context.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package warning + +import ( + "context" +) + +// The key type is unexported to prevent collisions +type key int + +const ( + // warningRecorderKey is the context key for the warning recorder. + warningRecorderKey key = iota +) + +// Recorder provides a method for recording warnings +type Recorder interface { + // AddWarning adds the specified warning to the response. + // agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters. + // text must be valid UTF-8, and must not contain control characters. + AddWarning(agent, text string) +} + +// WithWarningRecorder returns a new context that wraps the provided context and contains the provided Recorder implementation. +// The returned context can be passed to AddWarning(). +func WithWarningRecorder(ctx context.Context, recorder Recorder) context.Context { + return context.WithValue(ctx, warningRecorderKey, recorder) +} + +func warningRecorderFrom(ctx context.Context) (Recorder, bool) { + recorder, ok := ctx.Value(warningRecorderKey).(Recorder) + return recorder, ok +} + +// AddWarning records a warning for the specified agent and text to the Recorder added to the provided context using WithWarningRecorder(). +// If no Recorder exists in the provided context, this is a no-op. +// agent must be valid UTF-8, and must not contain spaces, quotes, backslashes, or control characters. +// text must be valid UTF-8, and must not contain control characters. +func AddWarning(ctx context.Context, agent string, text string) { + recorder, ok := warningRecorderFrom(ctx) + if !ok { + return + } + recorder.AddWarning(agent, text) +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/metrics.go b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/metrics.go new file mode 100644 index 00000000..32e469e8 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/metrics.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "context" +) + +// AuthenticatorMetrics specifies a set of methods that are used to register various metrics +type AuthenticatorMetrics struct { + // RecordRequestTotal increments the total number of requests for webhooks + RecordRequestTotal func(ctx context.Context, code string) + + // RecordRequestLatency measures request latency in seconds for webhooks. Broken down by status code. + RecordRequestLatency func(ctx context.Context, code string, latency float64) +} + +type noopMetrics struct{} + +func (noopMetrics) RequestTotal(context.Context, string) {} +func (noopMetrics) RequestLatency(context.Context, string, float64) {} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go new file mode 100644 index 00000000..7d19b4b7 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go @@ -0,0 +1,327 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the authenticator.Token interface using HTTP webhooks. +package webhook + +import ( + "context" + "errors" + "fmt" + "strconv" + "time" + + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/client-go/kubernetes/scheme" + authenticationv1client "k8s.io/client-go/kubernetes/typed/authentication/v1" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +// DefaultRetryBackoff returns the default backoff parameters for webhook retry. +func DefaultRetryBackoff() *wait.Backoff { + backoff := webhook.DefaultRetryBackoffWithInitialDelay(500 * time.Millisecond) + return &backoff +} + +// Ensure WebhookTokenAuthenticator implements the authenticator.Token interface. +var _ authenticator.Token = (*WebhookTokenAuthenticator)(nil) + +type tokenReviewer interface { + Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, int, error) +} + +type WebhookTokenAuthenticator struct { + tokenReview tokenReviewer + retryBackoff wait.Backoff + implicitAuds authenticator.Audiences + requestTimeout time.Duration + metrics AuthenticatorMetrics +} + +// NewFromInterface creates a webhook authenticator using the given tokenReview +// client. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func NewFromInterface(tokenReview authenticationv1client.AuthenticationV1Interface, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff, requestTimeout time.Duration, metrics AuthenticatorMetrics) (*WebhookTokenAuthenticator, error) { + tokenReviewClient := &tokenReviewV1Client{tokenReview.RESTClient()} + return newWithBackoff(tokenReviewClient, retryBackoff, implicitAuds, requestTimeout, metrics) +} + +// New creates a new WebhookTokenAuthenticator from the provided rest +// config. It is recommend to wrap this authenticator with the token cache +// authenticator implemented in +// k8s.io/apiserver/pkg/authentication/token/cache. +func New(config *rest.Config, version string, implicitAuds authenticator.Audiences, retryBackoff wait.Backoff) (*WebhookTokenAuthenticator, error) { + tokenReview, err := tokenReviewInterfaceFromConfig(config, version, retryBackoff) + if err != nil { + return nil, err + } + return newWithBackoff(tokenReview, retryBackoff, implicitAuds, time.Duration(0), AuthenticatorMetrics{ + RecordRequestTotal: noopMetrics{}.RequestTotal, + RecordRequestLatency: noopMetrics{}.RequestLatency, + }) +} + +// newWithBackoff allows tests to skip the sleep. +func newWithBackoff(tokenReview tokenReviewer, retryBackoff wait.Backoff, implicitAuds authenticator.Audiences, requestTimeout time.Duration, metrics AuthenticatorMetrics) (*WebhookTokenAuthenticator, error) { + return &WebhookTokenAuthenticator{ + tokenReview, + retryBackoff, + implicitAuds, + requestTimeout, + metrics, + }, nil +} + +// AuthenticateToken implements the authenticator.Token interface. +func (w *WebhookTokenAuthenticator) AuthenticateToken(ctx context.Context, token string) (*authenticator.Response, bool, error) { + // We take implicit audiences of the API server at WebhookTokenAuthenticator + // construction time. The outline of how we validate audience here is: + // + // * if the ctx is not audience limited, don't do any audience validation. + // * if ctx is audience-limited, add the audiences to the tokenreview spec + // * if the tokenreview returns with audiences in the status that intersect + // with the audiences in the ctx, copy into the response and return success + // * if the tokenreview returns without an audience in the status, ensure + // the ctx audiences intersect with the implicit audiences, and set the + // intersection in the response. + // * otherwise return unauthenticated. + wantAuds, checkAuds := authenticator.AudiencesFrom(ctx) + r := &authenticationv1.TokenReview{ + Spec: authenticationv1.TokenReviewSpec{ + Token: token, + Audiences: wantAuds, + }, + } + var ( + result *authenticationv1.TokenReview + auds authenticator.Audiences + cancel context.CancelFunc + ) + + // set a hard timeout if it was defined + // if the child has a shorter deadline then it will expire first, + // otherwise if the parent has a shorter deadline then the parent will expire and it will be propagate to the child + if w.requestTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, w.requestTimeout) + defer cancel() + } + + // WithExponentialBackoff will return tokenreview create error (tokenReviewErr) if any. + if err := webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { + var tokenReviewErr error + var statusCode int + + start := time.Now() + result, statusCode, tokenReviewErr = w.tokenReview.Create(ctx, r, metav1.CreateOptions{}) + latency := time.Since(start) + + if statusCode != 0 { + w.metrics.RecordRequestTotal(ctx, strconv.Itoa(statusCode)) + w.metrics.RecordRequestLatency(ctx, strconv.Itoa(statusCode), latency.Seconds()) + return tokenReviewErr + } + + if tokenReviewErr != nil { + w.metrics.RecordRequestTotal(ctx, "") + w.metrics.RecordRequestLatency(ctx, "", latency.Seconds()) + } + return tokenReviewErr + }, webhook.DefaultShouldRetry); err != nil { + // An error here indicates bad configuration or an outage. Log for debugging. + klog.Errorf("Failed to make webhook authenticator request: %v", err) + return nil, false, err + } + + if checkAuds { + gotAuds := w.implicitAuds + if len(result.Status.Audiences) > 0 { + gotAuds = result.Status.Audiences + } + auds = wantAuds.Intersect(gotAuds) + if len(auds) == 0 { + return nil, false, nil + } + } + + r.Status = result.Status + if !r.Status.Authenticated { + var err error + if len(r.Status.Error) != 0 { + err = errors.New(r.Status.Error) + } + return nil, false, err + } + + var extra map[string][]string + if r.Status.User.Extra != nil { + extra = map[string][]string{} + for k, v := range r.Status.User.Extra { + extra[k] = v + } + } + + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: r.Status.User.Username, + UID: r.Status.User.UID, + Groups: r.Status.User.Groups, + Extra: extra, + }, + Audiences: auds, + }, true, nil +} + +// tokenReviewInterfaceFromConfig builds a client from the specified kubeconfig file, +// and returns a TokenReviewInterface that uses that client. Note that the client submits TokenReview +// requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. +func tokenReviewInterfaceFromConfig(config *rest.Config, version string, retryBackoff wait.Backoff) (tokenReviewer, error) { + localScheme := runtime.NewScheme() + if err := scheme.AddToScheme(localScheme); err != nil { + return nil, err + } + + switch version { + case authenticationv1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authenticationv1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, config, groupVersions, retryBackoff) + if err != nil { + return nil, err + } + return &tokenReviewV1ClientGW{gw.RestClient}, nil + + case authenticationv1beta1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authenticationv1beta1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, config, groupVersions, retryBackoff) + if err != nil { + return nil, err + } + return &tokenReviewV1beta1ClientGW{gw.RestClient}, nil + + default: + return nil, fmt.Errorf( + "unsupported authentication webhook version %q, supported versions are %q, %q", + version, + authenticationv1.SchemeGroupVersion.Version, + authenticationv1beta1.SchemeGroupVersion.Version, + ) + } + +} + +type tokenReviewV1Client struct { + client rest.Interface +} + +// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, HTTP status code and an error, if there is any. +func (c *tokenReviewV1Client) Create(ctx context.Context, tokenReview *authenticationv1.TokenReview, opts metav1.CreateOptions) (result *authenticationv1.TokenReview, statusCode int, err error) { + result = &authenticationv1.TokenReview{} + + restResult := c.client.Post(). + Resource("tokenreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(tokenReview). + Do(ctx) + + restResult.StatusCode(&statusCode) + err = restResult.Into(result) + return +} + +// tokenReviewV1ClientGW used by the generic webhook, doesn't specify GVR. +type tokenReviewV1ClientGW struct { + client rest.Interface +} + +// Create takes the representation of a tokenReview and creates it. Returns the server's representation of the tokenReview, HTTP status code and an error, if there is any. +func (c *tokenReviewV1ClientGW) Create(ctx context.Context, tokenReview *authenticationv1.TokenReview, opts metav1.CreateOptions) (result *authenticationv1.TokenReview, statusCode int, err error) { + result = &authenticationv1.TokenReview{} + + restResult := c.client.Post(). + Body(tokenReview). + Do(ctx) + + restResult.StatusCode(&statusCode) + err = restResult.Into(result) + return +} + +// tokenReviewV1beta1ClientGW used by the generic webhook, doesn't specify GVR. +type tokenReviewV1beta1ClientGW struct { + client rest.Interface +} + +func (t *tokenReviewV1beta1ClientGW) Create(ctx context.Context, review *authenticationv1.TokenReview, _ metav1.CreateOptions) (*authenticationv1.TokenReview, int, error) { + var statusCode int + v1beta1Review := &authenticationv1beta1.TokenReview{Spec: v1SpecToV1beta1Spec(&review.Spec)} + v1beta1Result := &authenticationv1beta1.TokenReview{} + + restResult := t.client.Post().Body(v1beta1Review).Do(ctx) + restResult.StatusCode(&statusCode) + err := restResult.Into(v1beta1Result) + if err != nil { + return nil, statusCode, err + } + review.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) + return review, statusCode, nil +} + +func v1SpecToV1beta1Spec(in *authenticationv1.TokenReviewSpec) authenticationv1beta1.TokenReviewSpec { + return authenticationv1beta1.TokenReviewSpec{ + Token: in.Token, + Audiences: in.Audiences, + } +} + +func v1beta1StatusToV1Status(in *authenticationv1beta1.TokenReviewStatus) authenticationv1.TokenReviewStatus { + return authenticationv1.TokenReviewStatus{ + Authenticated: in.Authenticated, + User: v1beta1UserToV1User(in.User), + Audiences: in.Audiences, + Error: in.Error, + } +} + +func v1beta1UserToV1User(u authenticationv1beta1.UserInfo) authenticationv1.UserInfo { + var extra map[string]authenticationv1.ExtraValue + if u.Extra != nil { + extra = make(map[string]authenticationv1.ExtraValue, len(u.Extra)) + for k, v := range u.Extra { + extra[k] = authenticationv1.ExtraValue(v) + } + } + return authenticationv1.UserInfo{ + Username: u.Username, + UID: u.UID, + Groups: u.Groups, + Extra: extra, + } +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh new file mode 100644 index 00000000..a66f8f38 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh @@ -0,0 +1,102 @@ +#!/usr/bin/env bash + +# Copyright 2016 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -e + +# gencerts.sh generates the certificates for the webhook authz plugin tests. +# +# It is not expected to be run often (there is no go generate rule), and mainly +# exists for documentation purposes. + +cat > server.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +IP.1 = 127.0.0.1 +EOF + +cat > client.conf << EOF +[req] +req_extensions = v3_req +distinguished_name = req_distinguished_name +[req_distinguished_name] +[ v3_req ] +basicConstraints = CA:FALSE +keyUsage = nonRepudiation, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth +EOF + +# Create a certificate authority +openssl genrsa -out caKey.pem 2048 +openssl req -x509 -new -nodes -key caKey.pem -days 100000 -out caCert.pem -subj "/CN=webhook_authz_ca" + +# Create a second certificate authority +openssl genrsa -out badCAKey.pem 2048 +openssl req -x509 -new -nodes -key badCAKey.pem -days 100000 -out badCACert.pem -subj "/CN=webhook_authz_ca" + +# Create a server certiticate +openssl genrsa -out serverKey.pem 2048 +openssl req -new -key serverKey.pem -out server.csr -subj "/CN=webhook_authz_server" -config server.conf +openssl x509 -req -in server.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out serverCert.pem -days 100000 -extensions v3_req -extfile server.conf + +# Create a client certiticate +openssl genrsa -out clientKey.pem 2048 +openssl req -new -key clientKey.pem -out client.csr -subj "/CN=webhook_authz_client" -config client.conf +openssl x509 -req -in client.csr -CA caCert.pem -CAkey caKey.pem -CAcreateserial -out clientCert.pem -days 100000 -extensions v3_req -extfile client.conf + +outfile=certs_test.go + +cat > $outfile << EOF +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was generated using openssl by the gencerts.sh script +// and holds raw certificates for the webhook tests. + +package webhook +EOF + +for file in caKey caCert badCAKey badCACert serverKey serverCert clientKey clientCert; do + data=$(cat ${file}.pem) + echo "" >> $outfile + echo "var $file = []byte(\`$data\`)" >> $outfile +done + +# Clean up after we're done. +rm ./*.pem +rm ./*.csr +rm ./*.srl +rm ./*.conf diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics/metrics.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics/metrics.go new file mode 100644 index 00000000..23f82cc6 --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics/metrics.go @@ -0,0 +1,166 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "sync" + + "k8s.io/apiserver/pkg/authorization/cel" + compbasemetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +// AuthorizerMetrics specifies a set of methods that are used to register various metrics for the webhook authorizer +type AuthorizerMetrics interface { + // Request total and latency metrics + RequestMetrics + // Webhook count, latency, and fail open metrics + WebhookMetrics + // match condition metrics + cel.MatcherMetrics +} + +type NoopAuthorizerMetrics struct { + NoopRequestMetrics + NoopWebhookMetrics + cel.NoopMatcherMetrics +} + +type RequestMetrics interface { + // RecordRequestTotal increments the total number of requests for the webhook authorizer + RecordRequestTotal(ctx context.Context, code string) + + // RecordRequestLatency measures request latency in seconds for webhooks. Broken down by status code. + RecordRequestLatency(ctx context.Context, code string, latency float64) +} + +type NoopRequestMetrics struct{} + +func (NoopRequestMetrics) RecordRequestTotal(context.Context, string) {} +func (NoopRequestMetrics) RecordRequestLatency(context.Context, string, float64) {} + +type WebhookMetrics interface { + // RecordWebhookEvaluation increments with each round-trip of a webhook authorizer. + // result is one of: + // - canceled: the call invoking the webhook request was canceled + // - timeout: the webhook request timed out + // - error: the webhook response completed and was invalid + // - success: the webhook response completed and was well-formed + RecordWebhookEvaluation(ctx context.Context, name, result string) + // RecordWebhookDuration records latency for each round-trip of a webhook authorizer. + // result is one of: + // - canceled: the call invoking the webhook request was canceled + // - timeout: the webhook request timed out + // - error: the webhook response completed and was invalid + // - success: the webhook response completed and was well-formed + RecordWebhookDuration(ctx context.Context, name, result string, duration float64) + // RecordWebhookFailOpen increments when a webhook timeout or error results in a fail open + // of a request which has not been canceled. + // result is one of: + // - timeout: the webhook request timed out + // - error: the webhook response completed and was invalid + RecordWebhookFailOpen(ctx context.Context, name, result string) +} + +type NoopWebhookMetrics struct{} + +func (NoopWebhookMetrics) RecordWebhookEvaluation(ctx context.Context, name, result string) {} +func (NoopWebhookMetrics) RecordWebhookDuration(ctx context.Context, name, result string, duration float64) { +} +func (NoopWebhookMetrics) RecordWebhookFailOpen(ctx context.Context, name, result string) {} + +var registerWebhookMetrics sync.Once + +// RegisterMetrics registers authorizer metrics. +func RegisterWebhookMetrics() { + registerWebhookMetrics.Do(func() { + legacyregistry.MustRegister(webhookEvaluations) + legacyregistry.MustRegister(webhookDuration) + legacyregistry.MustRegister(webhookFailOpen) + }) +} + +func ResetMetricsForTest() { + webhookEvaluations.Reset() + webhookDuration.Reset() + webhookFailOpen.Reset() +} + +const ( + namespace = "apiserver" + subsystem = "authorization" +) + +var ( + webhookEvaluations = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_evaluations_total", + Help: "Round-trips to authorization webhooks.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"name", "result"}, + ) + + webhookDuration = compbasemetrics.NewHistogramVec( + &compbasemetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_duration_seconds", + Help: "Request latency in seconds.", + Buckets: compbasemetrics.DefBuckets, + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"name", "result"}, + ) + + webhookFailOpen = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "webhook_evaluations_fail_open_total", + Help: "NoOpinion results due to webhook timeout or error.", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"name", "result"}, + ) +) + +type webhookMetrics struct{} + +func NewWebhookMetrics() WebhookMetrics { + RegisterWebhookMetrics() + return webhookMetrics{} +} + +func ResetWebhookMetricsForTest() { + webhookEvaluations.Reset() + webhookDuration.Reset() + webhookFailOpen.Reset() +} + +func (webhookMetrics) RecordWebhookEvaluation(ctx context.Context, name, result string) { + webhookEvaluations.WithContext(ctx).WithLabelValues(name, result).Inc() +} +func (webhookMetrics) RecordWebhookDuration(ctx context.Context, name, result string, duration float64) { + webhookDuration.WithContext(ctx).WithLabelValues(name, result).Observe(duration) +} +func (webhookMetrics) RecordWebhookFailOpen(ctx context.Context, name, result string) { + webhookFailOpen.WithContext(ctx).WithLabelValues(name, result).Inc() +} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go new file mode 100644 index 00000000..ebc4949d --- /dev/null +++ b/vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go @@ -0,0 +1,605 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook implements the authorizer.Authorizer interface using HTTP webhooks. +package webhook + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "strconv" + "time" + + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/cache" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/apis/apiserver" + apiservervalidation "k8s.io/apiserver/pkg/apis/apiserver/validation" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + authorizationcel "k8s.io/apiserver/pkg/authorization/cel" + genericfeatures "k8s.io/apiserver/pkg/features" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/apiserver/pkg/util/webhook" + "k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics" + "k8s.io/client-go/kubernetes/scheme" + authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1" + "k8s.io/client-go/rest" + "k8s.io/klog/v2" +) + +const ( + // The maximum length of requester-controlled attributes to allow caching. + maxControlledAttrCacheSize = 10000 +) + +// DefaultRetryBackoff returns the default backoff parameters for webhook retry. +func DefaultRetryBackoff() *wait.Backoff { + backoff := webhook.DefaultRetryBackoffWithInitialDelay(500 * time.Millisecond) + return &backoff +} + +// Ensure Webhook implements the authorizer.Authorizer interface. +var _ authorizer.Authorizer = (*WebhookAuthorizer)(nil) + +type subjectAccessReviewer interface { + Create(context.Context, *authorizationv1.SubjectAccessReview, metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, int, error) +} + +type WebhookAuthorizer struct { + subjectAccessReview subjectAccessReviewer + responseCache *cache.LRUExpireCache + authorizedTTL time.Duration + unauthorizedTTL time.Duration + retryBackoff wait.Backoff + decisionOnError authorizer.Decision + metrics metrics.AuthorizerMetrics + celMatcher *authorizationcel.CELMatcher + name string +} + +// NewFromInterface creates a WebhookAuthorizer using the given subjectAccessReview client +func NewFromInterface(subjectAccessReview authorizationv1client.AuthorizationV1Interface, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, metrics metrics.AuthorizerMetrics) (*WebhookAuthorizer, error) { + return newWithBackoff(&subjectAccessReviewV1Client{subjectAccessReview.RESTClient()}, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, nil, metrics, "") +} + +// New creates a new WebhookAuthorizer from the provided kubeconfig file. +// The config's cluster field is used to refer to the remote service, user refers to the returned authorizer. +// +// # clusters refers to the remote service. +// clusters: +// - name: name-of-remote-authz-service +// cluster: +// certificate-authority: /path/to/ca.pem # CA for verifying the remote service. +// server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'. +// +// # users refers to the API server's webhook configuration. +// users: +// - name: name-of-api-server +// user: +// client-certificate: /path/to/cert.pem # cert for the webhook plugin to use +// client-key: /path/to/key.pem # key matching the cert +// +// For additional HTTP configuration, refer to the kubeconfig documentation +// https://kubernetes.io/docs/user-guide/kubeconfig-file/. +func New(config *rest.Config, version string, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, name string, metrics metrics.AuthorizerMetrics) (*WebhookAuthorizer, error) { + subjectAccessReview, err := subjectAccessReviewInterfaceFromConfig(config, version, retryBackoff) + if err != nil { + return nil, err + } + return newWithBackoff(subjectAccessReview, authorizedTTL, unauthorizedTTL, retryBackoff, decisionOnError, matchConditions, metrics, name) +} + +// newWithBackoff allows tests to skip the sleep. +func newWithBackoff(subjectAccessReview subjectAccessReviewer, authorizedTTL, unauthorizedTTL time.Duration, retryBackoff wait.Backoff, decisionOnError authorizer.Decision, matchConditions []apiserver.WebhookMatchCondition, am metrics.AuthorizerMetrics, name string) (*WebhookAuthorizer, error) { + // compile all expressions once in validation and save the results to be used for eval later + cm, fieldErr := apiservervalidation.ValidateAndCompileMatchConditions(matchConditions) + if err := fieldErr.ToAggregate(); err != nil { + return nil, err + } + if cm != nil { + cm.AuthorizerType = "Webhook" + cm.AuthorizerName = name + cm.Metrics = am + } + return &WebhookAuthorizer{ + subjectAccessReview: subjectAccessReview, + responseCache: cache.NewLRUExpireCache(8192), + authorizedTTL: authorizedTTL, + unauthorizedTTL: unauthorizedTTL, + retryBackoff: retryBackoff, + decisionOnError: decisionOnError, + metrics: am, + celMatcher: cm, + name: name, + }, nil +} + +// Authorize makes a REST request to the remote service describing the attempted action as a JSON +// serialized api.authorization.v1beta1.SubjectAccessReview object. An example request body is +// provided below. +// +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "spec": { +// "resourceAttributes": { +// "namespace": "kittensandponies", +// "verb": "GET", +// "group": "group3", +// "resource": "pods" +// }, +// "user": "jane", +// "group": [ +// "group1", +// "group2" +// ] +// } +// } +// +// The remote service is expected to fill the SubjectAccessReviewStatus field to either allow or +// disallow access. A permissive response would return: +// +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "status": { +// "allowed": true +// } +// } +// +// To disallow access, the remote service would return: +// +// { +// "apiVersion": "authorization.k8s.io/v1beta1", +// "kind": "SubjectAccessReview", +// "status": { +// "allowed": false, +// "reason": "user does not have read access to the namespace" +// } +// } +// +// TODO(mikedanese): We should eventually support failing closed when we +// encounter an error. We are failing open now to preserve backwards compatible +// behavior. +func (w *WebhookAuthorizer) Authorize(ctx context.Context, attr authorizer.Attributes) (decision authorizer.Decision, reason string, err error) { + r := &authorizationv1.SubjectAccessReview{} + if user := attr.GetUser(); user != nil { + r.Spec = authorizationv1.SubjectAccessReviewSpec{ + User: user.GetName(), + UID: user.GetUID(), + Groups: user.GetGroups(), + Extra: convertToSARExtra(user.GetExtra()), + } + } + + if attr.IsResourceRequest() { + r.Spec.ResourceAttributes = resourceAttributesFrom(attr) + } else { + r.Spec.NonResourceAttributes = &authorizationv1.NonResourceAttributes{ + Path: attr.GetPath(), + Verb: attr.GetVerb(), + } + } + // skipping match when feature is not enabled + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StructuredAuthorizationConfiguration) { + // Process Match Conditions before calling the webhook + matches, err := w.match(ctx, r) + // If at least one matchCondition evaluates to an error (but none are FALSE): + // If failurePolicy=Deny, then the webhook rejects the request + // If failurePolicy=NoOpinion, then the error is ignored and the webhook is skipped + if err != nil { + return w.decisionOnError, "", err + } + // If at least one matchCondition successfully evaluates to FALSE, + // then the webhook is skipped. + if !matches { + return authorizer.DecisionNoOpinion, "", nil + } + } + // If all evaluated successfully and ALL matchConditions evaluate to TRUE, + // then the webhook is called. + key, err := json.Marshal(r.Spec) + if err != nil { + return w.decisionOnError, "", err + } + if entry, ok := w.responseCache.Get(string(key)); ok { + r.Status = entry.(authorizationv1.SubjectAccessReviewStatus) + } else { + var result *authorizationv1.SubjectAccessReview + var metricsResult string + // WithExponentialBackoff will return SAR create error (sarErr) if any. + if err := webhook.WithExponentialBackoff(ctx, w.retryBackoff, func() error { + var sarErr error + var statusCode int + + start := time.Now() + result, statusCode, sarErr = w.subjectAccessReview.Create(ctx, r, metav1.CreateOptions{}) + latency := time.Since(start) + + switch { + case sarErr == nil: + metricsResult = "success" + case ctx.Err() != nil: + metricsResult = "canceled" + case utilnet.IsTimeout(sarErr) || errors.Is(sarErr, context.DeadlineExceeded) || apierrors.IsTimeout(sarErr) || statusCode == http.StatusGatewayTimeout: + metricsResult = "timeout" + default: + metricsResult = "error" + } + w.metrics.RecordWebhookEvaluation(ctx, w.name, metricsResult) + w.metrics.RecordWebhookDuration(ctx, w.name, metricsResult, latency.Seconds()) + + if statusCode != 0 { + w.metrics.RecordRequestTotal(ctx, strconv.Itoa(statusCode)) + w.metrics.RecordRequestLatency(ctx, strconv.Itoa(statusCode), latency.Seconds()) + return sarErr + } + + if sarErr != nil { + w.metrics.RecordRequestTotal(ctx, "") + w.metrics.RecordRequestLatency(ctx, "", latency.Seconds()) + } + + return sarErr + }, webhook.DefaultShouldRetry); err != nil { + klog.Errorf("Failed to make webhook authorizer request: %v", err) + + // we're returning NoOpinion, and the parent context has not timed out or been canceled + if w.decisionOnError == authorizer.DecisionNoOpinion && ctx.Err() == nil { + w.metrics.RecordWebhookFailOpen(ctx, w.name, metricsResult) + } + + return w.decisionOnError, "", err + } + + r.Status = result.Status + if shouldCache(attr) { + if r.Status.Allowed { + w.responseCache.Add(string(key), r.Status, w.authorizedTTL) + } else { + w.responseCache.Add(string(key), r.Status, w.unauthorizedTTL) + } + } + } + switch { + case r.Status.Denied && r.Status.Allowed: + return authorizer.DecisionDeny, r.Status.Reason, fmt.Errorf("webhook subject access review returned both allow and deny response") + case r.Status.Denied: + return authorizer.DecisionDeny, r.Status.Reason, nil + case r.Status.Allowed: + return authorizer.DecisionAllow, r.Status.Reason, nil + default: + return authorizer.DecisionNoOpinion, r.Status.Reason, nil + } + +} + +func resourceAttributesFrom(attr authorizer.Attributes) *authorizationv1.ResourceAttributes { + ret := &authorizationv1.ResourceAttributes{ + Namespace: attr.GetNamespace(), + Verb: attr.GetVerb(), + Group: attr.GetAPIGroup(), + Version: attr.GetAPIVersion(), + Resource: attr.GetResource(), + Subresource: attr.GetSubresource(), + Name: attr.GetName(), + } + + if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.AuthorizeWithSelectors) { + // If we are able to get any requirements while parsing selectors, use them, even if there's an error. + // This is because selectors only narrow, so if a subset of selector requirements are available, the request can be allowed. + if selectorRequirements, _ := fieldSelectorToAuthorizationAPI(attr); len(selectorRequirements) > 0 { + ret.FieldSelector = &authorizationv1.FieldSelectorAttributes{ + Requirements: selectorRequirements, + } + } + + if selectorRequirements, _ := labelSelectorToAuthorizationAPI(attr); len(selectorRequirements) > 0 { + ret.LabelSelector = &authorizationv1.LabelSelectorAttributes{ + Requirements: selectorRequirements, + } + } + } + + return ret +} + +func fieldSelectorToAuthorizationAPI(attr authorizer.Attributes) ([]metav1.FieldSelectorRequirement, error) { + requirements, getFieldSelectorErr := attr.GetFieldSelector() + if len(requirements) == 0 { + return nil, getFieldSelectorErr + } + + retRequirements := []metav1.FieldSelectorRequirement{} + for _, requirement := range requirements { + retRequirement := metav1.FieldSelectorRequirement{} + switch { + case requirement.Operator == selection.Equals || requirement.Operator == selection.DoubleEquals || requirement.Operator == selection.In: + retRequirement.Operator = metav1.FieldSelectorOpIn + retRequirement.Key = requirement.Field + retRequirement.Values = []string{requirement.Value} + case requirement.Operator == selection.NotEquals || requirement.Operator == selection.NotIn: + retRequirement.Operator = metav1.FieldSelectorOpNotIn + retRequirement.Key = requirement.Field + retRequirement.Values = []string{requirement.Value} + default: + // ignore this particular requirement. since requirements are AND'd, it is safe to ignore unknown requirements + // for authorization since the resulting check will only be as broad or broader than the intended. + continue + } + retRequirements = append(retRequirements, retRequirement) + } + + if len(retRequirements) == 0 { + // this means that all requirements were dropped (likely due to unknown operators), so we are checking the broader + // unrestricted action. + return nil, getFieldSelectorErr + } + return retRequirements, getFieldSelectorErr +} + +func labelSelectorToAuthorizationAPI(attr authorizer.Attributes) ([]metav1.LabelSelectorRequirement, error) { + requirements, getLabelSelectorErr := attr.GetLabelSelector() + if len(requirements) == 0 { + return nil, getLabelSelectorErr + } + + retRequirements := []metav1.LabelSelectorRequirement{} + for _, requirement := range requirements { + retRequirement := metav1.LabelSelectorRequirement{ + Key: requirement.Key(), + } + if values := requirement.ValuesUnsorted(); len(values) > 0 { + retRequirement.Values = values + } + switch requirement.Operator() { + case selection.Equals, selection.DoubleEquals, selection.In: + retRequirement.Operator = metav1.LabelSelectorOpIn + case selection.NotEquals, selection.NotIn: + retRequirement.Operator = metav1.LabelSelectorOpNotIn + case selection.Exists: + retRequirement.Operator = metav1.LabelSelectorOpExists + case selection.DoesNotExist: + retRequirement.Operator = metav1.LabelSelectorOpDoesNotExist + default: + // ignore this particular requirement. since requirements are AND'd, it is safe to ignore unknown requirements + // for authorization since the resulting check will only be as broad or broader than the intended. + continue + } + retRequirements = append(retRequirements, retRequirement) + } + + if len(retRequirements) == 0 { + // this means that all requirements were dropped (likely due to unknown operators), so we are checking the broader + // unrestricted action. + return nil, getLabelSelectorErr + } + return retRequirements, getLabelSelectorErr +} + +// TODO: need to finish the method to get the rules when using webhook mode +func (w *WebhookAuthorizer) RulesFor(user user.Info, namespace string) ([]authorizer.ResourceRuleInfo, []authorizer.NonResourceRuleInfo, bool, error) { + var ( + resourceRules []authorizer.ResourceRuleInfo + nonResourceRules []authorizer.NonResourceRuleInfo + ) + incomplete := true + return resourceRules, nonResourceRules, incomplete, fmt.Errorf("webhook authorizer does not support user rule resolution") +} + +// Match is used to evaluate the SubjectAccessReviewSpec against +// the authorizer's matchConditions in the form of cel expressions +// to return match or no match found, which then is used to +// determine if the webhook should be skipped. +func (w *WebhookAuthorizer) match(ctx context.Context, r *authorizationv1.SubjectAccessReview) (bool, error) { + // A nil celMatcher or zero saved CompilationResults matches all requests. + if w.celMatcher == nil || w.celMatcher.CompilationResults == nil { + return true, nil + } + return w.celMatcher.Eval(ctx, r) +} + +func convertToSARExtra(extra map[string][]string) map[string]authorizationv1.ExtraValue { + if extra == nil { + return nil + } + ret := map[string]authorizationv1.ExtraValue{} + for k, v := range extra { + ret[k] = authorizationv1.ExtraValue(v) + } + + return ret +} + +// subjectAccessReviewInterfaceFromConfig builds a client from the specified kubeconfig file, +// and returns a SubjectAccessReviewInterface that uses that client. Note that the client submits SubjectAccessReview +// requests to the exact path specified in the kubeconfig file, so arbitrary non-API servers can be targeted. +func subjectAccessReviewInterfaceFromConfig(config *rest.Config, version string, retryBackoff wait.Backoff) (subjectAccessReviewer, error) { + localScheme := runtime.NewScheme() + if err := scheme.AddToScheme(localScheme); err != nil { + return nil, err + } + + switch version { + case authorizationv1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authorizationv1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, config, groupVersions, retryBackoff) + if err != nil { + return nil, err + } + return &subjectAccessReviewV1ClientGW{gw.RestClient}, nil + + case authorizationv1beta1.SchemeGroupVersion.Version: + groupVersions := []schema.GroupVersion{authorizationv1beta1.SchemeGroupVersion} + if err := localScheme.SetVersionPriority(groupVersions...); err != nil { + return nil, err + } + gw, err := webhook.NewGenericWebhook(localScheme, scheme.Codecs, config, groupVersions, retryBackoff) + if err != nil { + return nil, err + } + return &subjectAccessReviewV1beta1ClientGW{gw.RestClient}, nil + + default: + return nil, fmt.Errorf( + "unsupported webhook authorizer version %q, supported versions are %q, %q", + version, + authorizationv1.SchemeGroupVersion.Version, + authorizationv1beta1.SchemeGroupVersion.Version, + ) + } +} + +type subjectAccessReviewV1Client struct { + client rest.Interface +} + +func (t *subjectAccessReviewV1Client) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, opts metav1.CreateOptions) (result *authorizationv1.SubjectAccessReview, statusCode int, err error) { + result = &authorizationv1.SubjectAccessReview{} + + restResult := t.client.Post(). + Resource("subjectaccessreviews"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(subjectAccessReview). + Do(ctx) + + restResult.StatusCode(&statusCode) + err = restResult.Into(result) + return +} + +// subjectAccessReviewV1ClientGW used by the generic webhook, doesn't specify GVR. +type subjectAccessReviewV1ClientGW struct { + client rest.Interface +} + +func (t *subjectAccessReviewV1ClientGW) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, _ metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, int, error) { + var statusCode int + result := &authorizationv1.SubjectAccessReview{} + + restResult := t.client.Post().Body(subjectAccessReview).Do(ctx) + + restResult.StatusCode(&statusCode) + err := restResult.Into(result) + + return result, statusCode, err +} + +// subjectAccessReviewV1beta1ClientGW used by the generic webhook, doesn't specify GVR. +type subjectAccessReviewV1beta1ClientGW struct { + client rest.Interface +} + +func (t *subjectAccessReviewV1beta1ClientGW) Create(ctx context.Context, subjectAccessReview *authorizationv1.SubjectAccessReview, _ metav1.CreateOptions) (*authorizationv1.SubjectAccessReview, int, error) { + var statusCode int + v1beta1Review := &authorizationv1beta1.SubjectAccessReview{Spec: v1SpecToV1beta1Spec(&subjectAccessReview.Spec)} + v1beta1Result := &authorizationv1beta1.SubjectAccessReview{} + + restResult := t.client.Post().Body(v1beta1Review).Do(ctx) + + restResult.StatusCode(&statusCode) + err := restResult.Into(v1beta1Result) + if err == nil { + subjectAccessReview.Status = v1beta1StatusToV1Status(&v1beta1Result.Status) + } + return subjectAccessReview, statusCode, err +} + +// shouldCache determines whether it is safe to cache the given request attributes. If the +// requester-controlled attributes are too large, this may be a DoS attempt, so we skip the cache. +func shouldCache(attr authorizer.Attributes) bool { + controlledAttrSize := int64(len(attr.GetNamespace())) + + int64(len(attr.GetVerb())) + + int64(len(attr.GetAPIGroup())) + + int64(len(attr.GetAPIVersion())) + + int64(len(attr.GetResource())) + + int64(len(attr.GetSubresource())) + + int64(len(attr.GetName())) + + int64(len(attr.GetPath())) + return controlledAttrSize < maxControlledAttrCacheSize +} + +func v1beta1StatusToV1Status(in *authorizationv1beta1.SubjectAccessReviewStatus) authorizationv1.SubjectAccessReviewStatus { + return authorizationv1.SubjectAccessReviewStatus{ + Allowed: in.Allowed, + Denied: in.Denied, + Reason: in.Reason, + EvaluationError: in.EvaluationError, + } +} + +func v1SpecToV1beta1Spec(in *authorizationv1.SubjectAccessReviewSpec) authorizationv1beta1.SubjectAccessReviewSpec { + return authorizationv1beta1.SubjectAccessReviewSpec{ + ResourceAttributes: v1ResourceAttributesToV1beta1ResourceAttributes(in.ResourceAttributes), + NonResourceAttributes: v1NonResourceAttributesToV1beta1NonResourceAttributes(in.NonResourceAttributes), + User: in.User, + Groups: in.Groups, + Extra: v1ExtraToV1beta1Extra(in.Extra), + UID: in.UID, + } +} + +func v1ResourceAttributesToV1beta1ResourceAttributes(in *authorizationv1.ResourceAttributes) *authorizationv1beta1.ResourceAttributes { + if in == nil { + return nil + } + return &authorizationv1beta1.ResourceAttributes{ + Namespace: in.Namespace, + Verb: in.Verb, + Group: in.Group, + Version: in.Version, + Resource: in.Resource, + Subresource: in.Subresource, + Name: in.Name, + FieldSelector: in.FieldSelector, + LabelSelector: in.LabelSelector, + } +} + +func v1NonResourceAttributesToV1beta1NonResourceAttributes(in *authorizationv1.NonResourceAttributes) *authorizationv1beta1.NonResourceAttributes { + if in == nil { + return nil + } + return &authorizationv1beta1.NonResourceAttributes{ + Path: in.Path, + Verb: in.Verb, + } +} + +func v1ExtraToV1beta1Extra(in map[string]authorizationv1.ExtraValue) map[string]authorizationv1beta1.ExtraValue { + if in == nil { + return nil + } + ret := make(map[string]authorizationv1beta1.ExtraValue, len(in)) + for k, v := range in { + ret[k] = authorizationv1beta1.ExtraValue(v) + } + return ret +} diff --git a/vendor/k8s.io/client-go/tools/events/OWNERS b/vendor/k8s.io/client-go/tools/events/OWNERS new file mode 100644 index 00000000..8c1137f4 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-instrumentation-approvers + - wojtek-t +reviewers: + - sig-instrumentation-reviewers + - wojtek-t +emeritus_approvers: + - yastij diff --git a/vendor/k8s.io/client-go/tools/events/doc.go b/vendor/k8s.io/client-go/tools/events/doc.go new file mode 100644 index 00000000..8c48f607 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package events has all client logic for recording and reporting +// "k8s.io/api/events/v1".Event events. +package events // import "k8s.io/client-go/tools/events" diff --git a/vendor/k8s.io/client-go/tools/events/event_broadcaster.go b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go new file mode 100644 index 00000000..94c2012b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/event_broadcaster.go @@ -0,0 +1,457 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + typedv1core "k8s.io/client-go/kubernetes/typed/core/v1" + typedeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/record/util" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +const ( + maxTriesPerEvent = 12 + finishTime = 6 * time.Minute + refreshTime = 30 * time.Minute + maxQueuedEvents = 1000 +) + +var defaultSleepDuration = 10 * time.Second + +// TODO: validate impact of copying and investigate hashing +type eventKey struct { + eventType string + action string + reason string + reportingController string + reportingInstance string + regarding corev1.ObjectReference + related corev1.ObjectReference +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + mu sync.Mutex + eventCache map[eventKey]*eventsv1.Event + sleepDuration time.Duration + sink EventSink +} + +// EventSinkImpl wraps EventsV1Interface to implement EventSink. +// TODO: this makes it easier for testing purpose and masks the logic of performing API calls. +// Note that rollbacking to raw clientset should also be transparent. +type EventSinkImpl struct { + Interface typedeventsv1.EventsV1Interface +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (e *EventSinkImpl) Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't create an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Create(ctx, event, metav1.CreateOptions{}) +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (e *EventSinkImpl) Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't update an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Update(ctx, event, metav1.UpdateOptions{}) +} + +// Patch applies the patch and returns the patched event, and an error, if there is any. +func (e *EventSinkImpl) Patch(ctx context.Context, event *eventsv1.Event, data []byte) (*eventsv1.Event, error) { + if event.Namespace == "" { + return nil, fmt.Errorf("can't patch an event with empty namespace") + } + return e.Interface.Events(event.Namespace).Patch(ctx, event.Name, types.StrategicMergePatchType, data, metav1.PatchOptions{}) +} + +// NewBroadcaster Creates a new event broadcaster. +func NewBroadcaster(sink EventSink) EventBroadcaster { + return newBroadcaster(sink, defaultSleepDuration, map[eventKey]*eventsv1.Event{}) +} + +// NewBroadcasterForTest Creates a new event broadcaster for test purposes. +func newBroadcaster(sink EventSink, sleepDuration time.Duration, eventCache map[eventKey]*eventsv1.Event) EventBroadcaster { + return &eventBroadcasterImpl{ + Broadcaster: watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), + eventCache: eventCache, + sleepDuration: sleepDuration, + sink: sink, + } +} + +func (e *eventBroadcasterImpl) Shutdown() { + e.Broadcaster.Shutdown() +} + +// refreshExistingEventSeries refresh events TTL +func (e *eventBroadcasterImpl) refreshExistingEventSeries(ctx context.Context) { + // TODO: Investigate whether lock contention won't be a problem + e.mu.Lock() + defer e.mu.Unlock() + for isomorphicKey, event := range e.eventCache { + if event.Series != nil { + if recordedEvent, retry := recordEvent(ctx, e.sink, event); !retry { + if recordedEvent != nil { + e.eventCache[isomorphicKey] = recordedEvent + } + } + } + } +} + +// finishSeries checks if a series has ended and either: +// - write final count to the apiserver +// - delete a singleton event (i.e. series field is nil) from the cache +func (e *eventBroadcasterImpl) finishSeries(ctx context.Context) { + // TODO: Investigate whether lock contention won't be a problem + e.mu.Lock() + defer e.mu.Unlock() + for isomorphicKey, event := range e.eventCache { + eventSerie := event.Series + if eventSerie != nil { + if eventSerie.LastObservedTime.Time.Before(time.Now().Add(-finishTime)) { + if _, retry := recordEvent(ctx, e.sink, event); !retry { + delete(e.eventCache, isomorphicKey) + } + } + } else if event.EventTime.Time.Before(time.Now().Add(-finishTime)) { + delete(e.eventCache, isomorphicKey) + } + } +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (e *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger { + hostname, _ := os.Hostname() + reportingInstance := reportingController + "-" + hostname + return &recorderImplLogger{recorderImpl: &recorderImpl{scheme, reportingController, reportingInstance, e.Broadcaster, clock.RealClock{}}, logger: klog.Background()} +} + +func (e *eventBroadcasterImpl) recordToSink(ctx context.Context, event *eventsv1.Event, clock clock.Clock) { + // Make a copy before modification, because there could be multiple listeners. + eventCopy := event.DeepCopy() + go func() { + evToRecord := func() *eventsv1.Event { + e.mu.Lock() + defer e.mu.Unlock() + eventKey := getKey(eventCopy) + isomorphicEvent, isIsomorphic := e.eventCache[eventKey] + if isIsomorphic { + if isomorphicEvent.Series != nil { + isomorphicEvent.Series.Count++ + isomorphicEvent.Series.LastObservedTime = metav1.MicroTime{Time: clock.Now()} + return nil + } + isomorphicEvent.Series = &eventsv1.EventSeries{ + Count: 2, + LastObservedTime: metav1.MicroTime{Time: clock.Now()}, + } + // Make a copy of the Event to make sure that recording it + // doesn't mess with the object stored in cache. + return isomorphicEvent.DeepCopy() + } + e.eventCache[eventKey] = eventCopy + // Make a copy of the Event to make sure that recording it doesn't + // mess with the object stored in cache. + return eventCopy.DeepCopy() + }() + if evToRecord != nil { + // TODO: Add a metric counting the number of recording attempts + e.attemptRecording(ctx, evToRecord) + // We don't want the new recorded Event to be reflected in the + // client's cache because server-side mutations could mess with the + // aggregation mechanism used by the client. + } + }() +} + +func (e *eventBroadcasterImpl) attemptRecording(ctx context.Context, event *eventsv1.Event) { + tries := 0 + for { + if _, retry := recordEvent(ctx, e.sink, event); !retry { + return + } + tries++ + if tries >= maxTriesPerEvent { + klog.FromContext(ctx).Error(nil, "Unable to write event (retry limit exceeded!)", "event", event) + return + } + // Randomize sleep so that various clients won't all be + // synced up if the master goes down. Give up when + // the context is canceled. + select { + case <-ctx.Done(): + return + case <-time.After(wait.Jitter(e.sleepDuration, 0.25)): + } + } +} + +func recordEvent(ctx context.Context, sink EventSink, event *eventsv1.Event) (*eventsv1.Event, bool) { + var newEvent *eventsv1.Event + var err error + isEventSeries := event.Series != nil + if isEventSeries { + patch, patchBytesErr := createPatchBytesForSeries(event) + if patchBytesErr != nil { + klog.FromContext(ctx).Error(patchBytesErr, "Unable to calculate diff, no merge is possible") + return nil, false + } + newEvent, err = sink.Patch(ctx, event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !isEventSeries || (isEventSeries && util.IsKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(ctx, event) + } + if err == nil { + return newEvent, false + } + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + klog.FromContext(ctx).Error(err, "Unable to construct event (will not retry!)", "event", event) + return nil, false + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + // If we tried to create an Event from an EventSerie, it means that + // the original Patch request failed because the Event we were + // trying to patch didn't exist. If the creation failed because the + // Event now exists, it is safe to retry. This occurs when a new + // Event is emitted twice in a very short period of time. + if isEventSeries { + return nil, true + } + klog.FromContext(ctx).V(5).Info("Server rejected event (will not retry!)", "event", event, "err", err) + } else { + klog.FromContext(ctx).Error(err, "Server rejected event (will not retry!)", "event", event) + } + return nil, false + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + klog.FromContext(ctx).Error(err, "Unable to write event (may retry after sleeping)") + return nil, true +} + +func createPatchBytesForSeries(event *eventsv1.Event) ([]byte, error) { + oldEvent := event.DeepCopy() + oldEvent.Series = nil + oldData, err := json.Marshal(oldEvent) + if err != nil { + return nil, err + } + newData, err := json.Marshal(event) + if err != nil { + return nil, err + } + return strategicpatch.CreateTwoWayMergePatch(oldData, newData, eventsv1.Event{}) +} + +func getKey(event *eventsv1.Event) eventKey { + key := eventKey{ + eventType: event.Type, + action: event.Action, + reason: event.Reason, + reportingController: event.ReportingController, + reportingInstance: event.ReportingInstance, + regarding: event.Regarding, + } + if event.Related != nil { + key.related = *event.Related + } + return key +} + +// StartStructuredLogging starts sending events received from this EventBroadcaster to the structured logging function. +// The return value can be ignored or used to stop recording, if desired. +// TODO: this function should also return an error. +// +// Deprecated: use StartLogging instead. +func (e *eventBroadcasterImpl) StartStructuredLogging(verbosity klog.Level) func() { + logger := klog.Background().V(int(verbosity)) + stopWatcher, err := e.StartLogging(logger) + if err != nil { + logger.Error(err, "Failed to start event watcher") + return func() {} + } + return stopWatcher +} + +// StartLogging starts sending events received from this EventBroadcaster to the structured logger. +// To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). +// The returned function can be ignored or used to stop recording, if desired. +func (e *eventBroadcasterImpl) StartLogging(logger klog.Logger) (func(), error) { + return e.StartEventWatcher( + func(obj runtime.Object) { + event, ok := obj.(*eventsv1.Event) + if !ok { + logger.Error(nil, "unexpected type, expected eventsv1.Event") + return + } + logger.Info("Event occurred", "object", klog.KRef(event.Regarding.Namespace, event.Regarding.Name), "kind", event.Regarding.Kind, "apiVersion", event.Regarding.APIVersion, "type", event.Type, "reason", event.Reason, "action", event.Action, "note", event.Note) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value is used to stop recording +func (e *eventBroadcasterImpl) StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) { + watcher, err := e.Watch() + if err != nil { + return nil, err + } + go func() { + defer utilruntime.HandleCrash() + for { + watchEvent, ok := <-watcher.ResultChan() + if !ok { + return + } + eventHandler(watchEvent.Object) + } + }() + return watcher.Stop, nil +} + +func (e *eventBroadcasterImpl) startRecordingEvents(ctx context.Context) error { + eventHandler := func(obj runtime.Object) { + event, ok := obj.(*eventsv1.Event) + if !ok { + klog.FromContext(ctx).Error(nil, "unexpected type, expected eventsv1.Event") + return + } + e.recordToSink(ctx, event, clock.RealClock{}) + } + stopWatcher, err := e.StartEventWatcher(eventHandler) + if err != nil { + return err + } + go func() { + <-ctx.Done() + stopWatcher() + }() + return nil +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// Deprecated: use StartRecordingToSinkWithContext instead. +func (e *eventBroadcasterImpl) StartRecordingToSink(stopCh <-chan struct{}) { + err := e.StartRecordingToSinkWithContext(wait.ContextForChannel(stopCh)) + if err != nil { + klog.Background().Error(err, "Failed to start recording to sink") + } +} + +// StartRecordingToSinkWithContext starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterImpl) StartRecordingToSinkWithContext(ctx context.Context) error { + go wait.UntilWithContext(ctx, e.refreshExistingEventSeries, refreshTime) + go wait.UntilWithContext(ctx, e.finishSeries, finishTime) + return e.startRecordingEvents(ctx) +} + +type eventBroadcasterAdapterImpl struct { + coreClient typedv1core.EventsGetter + coreBroadcaster record.EventBroadcaster + eventsv1Client typedeventsv1.EventsV1Interface + eventsv1Broadcaster EventBroadcaster +} + +// NewEventBroadcasterAdapter creates a wrapper around new and legacy broadcasters to simplify +// migration of individual components to the new Event API. +// +//logcheck:context // NewEventBroadcasterAdapterWithContext should be used instead because record.NewBroadcaster is called and works better when a context is supplied (contextual logging, cancellation). +func NewEventBroadcasterAdapter(client clientset.Interface) EventBroadcasterAdapter { + return NewEventBroadcasterAdapterWithContext(context.Background(), client) +} + +// NewEventBroadcasterAdapterWithContext creates a wrapper around new and legacy broadcasters to simplify +// migration of individual components to the new Event API. +func NewEventBroadcasterAdapterWithContext(ctx context.Context, client clientset.Interface) EventBroadcasterAdapter { + eventClient := &eventBroadcasterAdapterImpl{} + if _, err := client.Discovery().ServerResourcesForGroupVersion(eventsv1.SchemeGroupVersion.String()); err == nil { + eventClient.eventsv1Client = client.EventsV1() + eventClient.eventsv1Broadcaster = NewBroadcaster(&EventSinkImpl{Interface: eventClient.eventsv1Client}) + } + // Even though there can soon exist cases when coreBroadcaster won't really be needed, + // we create it unconditionally because its overhead is minor and will simplify using usage + // patterns of this library in all components. + eventClient.coreClient = client.CoreV1() + eventClient.coreBroadcaster = record.NewBroadcaster(record.WithContext(ctx)) + return eventClient +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +func (e *eventBroadcasterAdapterImpl) StartRecordingToSink(stopCh <-chan struct{}) { + if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { + e.eventsv1Broadcaster.StartRecordingToSink(stopCh) + } + if e.coreBroadcaster != nil && e.coreClient != nil { + e.coreBroadcaster.StartRecordingToSink(&typedv1core.EventSinkImpl{Interface: e.coreClient.Events("")}) + } +} + +func (e *eventBroadcasterAdapterImpl) NewRecorder(name string) EventRecorderLogger { + if e.eventsv1Broadcaster != nil && e.eventsv1Client != nil { + return e.eventsv1Broadcaster.NewRecorder(scheme.Scheme, name) + } + return record.NewEventRecorderAdapter(e.DeprecatedNewLegacyRecorder(name)) +} + +func (e *eventBroadcasterAdapterImpl) DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger { + return e.coreBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: name}) +} + +func (e *eventBroadcasterAdapterImpl) Shutdown() { + if e.coreBroadcaster != nil { + e.coreBroadcaster.Shutdown() + } + if e.eventsv1Broadcaster != nil { + e.eventsv1Broadcaster.Shutdown() + } +} diff --git a/vendor/k8s.io/client-go/tools/events/event_recorder.go b/vendor/k8s.io/client-go/tools/events/event_recorder.go new file mode 100644 index 00000000..65431788 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/event_recorder.go @@ -0,0 +1,113 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/record/util" + "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2" + "k8s.io/utils/clock" +) + +type recorderImpl struct { + scheme *runtime.Scheme + reportingController string + reportingInstance string + *watch.Broadcaster + clock clock.Clock +} + +var _ EventRecorder = &recorderImpl{} + +func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + recorder.eventf(klog.Background(), regarding, related, eventtype, reason, action, note, args...) +} + +type recorderImplLogger struct { + *recorderImpl + logger klog.Logger +} + +var _ EventRecorderLogger = &recorderImplLogger{} + +func (recorder *recorderImplLogger) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + recorder.eventf(recorder.logger, regarding, related, eventtype, reason, action, note, args...) +} + +func (recorder *recorderImplLogger) WithLogger(logger klog.Logger) EventRecorderLogger { + return &recorderImplLogger{recorderImpl: recorder.recorderImpl, logger: logger} +} + +func (recorder *recorderImpl) eventf(logger klog.Logger, regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + timestamp := metav1.MicroTime{Time: time.Now()} + message := fmt.Sprintf(note, args...) + refRegarding, err := reference.GetReference(recorder.scheme, regarding) + if err != nil { + logger.Error(err, "Could not construct reference, will not report event", "object", regarding, "eventType", eventtype, "reason", reason, "message", message) + return + } + + var refRelated *v1.ObjectReference + if related != nil { + refRelated, err = reference.GetReference(recorder.scheme, related) + if err != nil { + logger.V(9).Info("Could not construct reference", "object", related, "err", err) + } + } + if !util.ValidateEventType(eventtype) { + logger.Error(nil, "Unsupported event type", "eventType", eventtype) + return + } + event := recorder.makeEvent(refRegarding, refRelated, timestamp, eventtype, reason, message, recorder.reportingController, recorder.reportingInstance, action) + go func() { + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func (recorder *recorderImpl) makeEvent(refRegarding *v1.ObjectReference, refRelated *v1.ObjectReference, timestamp metav1.MicroTime, eventtype, reason, message string, reportingController string, reportingInstance string, action string) *eventsv1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := refRegarding.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &eventsv1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", refRegarding.Name, t.UnixNano()), + Namespace: namespace, + }, + EventTime: timestamp, + Series: nil, + ReportingController: reportingController, + ReportingInstance: reportingInstance, + Action: action, + Reason: reason, + Regarding: *refRegarding, + Related: refRelated, + Note: message, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/events/fake.go b/vendor/k8s.io/client-go/tools/events/fake.go new file mode 100644 index 00000000..e26826d6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/fake.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +var _ EventRecorderLogger = &FakeRecorder{} + +// Eventf emits an event +func (f *FakeRecorder) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+note, args...) + } +} + +func (f *FakeRecorder) WithLogger(logger klog.Logger) EventRecorderLogger { + return f +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/tools/events/helper.go b/vendor/k8s.io/client-go/tools/events/helper.go new file mode 100644 index 00000000..dfc57af4 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/helper.go @@ -0,0 +1,64 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + eventsv1 "k8s.io/api/events/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +var mapping = map[schema.GroupVersion]string{ + eventsv1.SchemeGroupVersion: "regarding", + eventsv1beta1.SchemeGroupVersion: "regarding", + corev1.SchemeGroupVersion: "involvedObject", +} + +// GetFieldSelector returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func GetFieldSelector(eventsGroupVersion schema.GroupVersion, regardingGroupVersionKind schema.GroupVersionKind, regardingName string, regardingUID types.UID) (fields.Selector, error) { + field := fields.Set{} + + if _, ok := mapping[eventsGroupVersion]; !ok { + return nil, fmt.Errorf("unknown version %v", eventsGroupVersion) + } + prefix := mapping[eventsGroupVersion] + + if len(regardingName) > 0 { + field[prefix+".name"] = regardingName + } + + if len(regardingGroupVersionKind.Kind) > 0 { + field[prefix+".kind"] = regardingGroupVersionKind.Kind + } + + regardingGroupVersion := regardingGroupVersionKind.GroupVersion() + if !regardingGroupVersion.Empty() { + field[prefix+".apiVersion"] = regardingGroupVersion.String() + } + + if len(regardingUID) > 0 { + field[prefix+".uid"] = string(regardingUID) + } + + return field.AsSelector(), nil +} diff --git a/vendor/k8s.io/client-go/tools/events/interfaces.go b/vendor/k8s.io/client-go/tools/events/interfaces.go new file mode 100644 index 00000000..bb6109f6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/events/interfaces.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package events + +import ( + "context" + + eventsv1 "k8s.io/api/events/v1" + "k8s.io/apimachinery/pkg/runtime" + internalevents "k8s.io/client-go/tools/internal/events" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" +) + +type EventRecorder = internalevents.EventRecorder +type EventRecorderLogger = internalevents.EventRecorderLogger + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + // Deprecated: use StartRecordingToSinkWithContext instead. + StartRecordingToSink(stopCh <-chan struct{}) + + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSinkWithContext(ctx context.Context) error + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, reportingController string) EventRecorderLogger + + // StartEventWatcher enables you to watch for emitted events without usage + // of StartRecordingToSink. This lets you also process events in a custom way (e.g. in tests). + // NOTE: events received on your eventHandler should be copied before being used. + // TODO: figure out if this can be removed. + StartEventWatcher(eventHandler func(event runtime.Object)) (func(), error) + + // StartStructuredLogging starts sending events received from this EventBroadcaster to the structured + // logging function. The return value can be ignored or used to stop recording, if desired. + // Deprecated: use StartLogging instead. + StartStructuredLogging(verbosity klog.Level) func() + + // StartLogging starts sending events received from this EventBroadcaster to the structured logger. + // To adjust verbosity, use the logger's V method (i.e. pass `logger.V(3)` instead of `logger`). + // The returned function can be ignored or used to stop recording, if desired. + StartLogging(logger klog.Logger) (func(), error) + + // Shutdown shuts down the broadcaster + Shutdown() +} + +// EventSink knows how to store events (client-go implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// client-go's REST client. +type EventSink interface { + Create(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) + Update(ctx context.Context, event *eventsv1.Event) (*eventsv1.Event, error) + Patch(ctx context.Context, oldEvent *eventsv1.Event, data []byte) (*eventsv1.Event, error) +} + +// EventBroadcasterAdapter is a auxiliary interface to simplify migration to +// the new events API. It is a wrapper around new and legacy broadcasters +// that smartly chooses which one to use. +// +// Deprecated: This interface will be removed once migration is completed. +type EventBroadcasterAdapter interface { + // StartRecordingToSink starts sending events received from the specified eventBroadcaster. + StartRecordingToSink(stopCh <-chan struct{}) + + // NewRecorder creates a new Event Recorder with specified name. + NewRecorder(name string) EventRecorderLogger + + // DeprecatedNewLegacyRecorder creates a legacy Event Recorder with specific name. + DeprecatedNewLegacyRecorder(name string) record.EventRecorderLogger + + // Shutdown shuts down the broadcaster. + Shutdown() +} diff --git a/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go new file mode 100644 index 00000000..11adc268 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/ciphersuites_flag.go @@ -0,0 +1,147 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "crypto/tls" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + // ciphers maps strings into tls package cipher constants in + // https://golang.org/pkg/crypto/tls/#pkg-constants + ciphers = map[string]uint16{} + insecureCiphers = map[string]uint16{} +) + +func init() { + for _, suite := range tls.CipherSuites() { + ciphers[suite.Name] = suite.ID + } + // keep legacy names for backward compatibility + ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + + for _, suite := range tls.InsecureCipherSuites() { + insecureCiphers[suite.Name] = suite.ID + } +} + +// InsecureTLSCiphers returns the cipher suites implemented by crypto/tls which have +// security issues. +func InsecureTLSCiphers() map[string]uint16 { + cipherKeys := make(map[string]uint16, len(insecureCiphers)) + for k, v := range insecureCiphers { + cipherKeys[k] = v + } + return cipherKeys +} + +// InsecureTLSCipherNames returns a list of cipher suite names implemented by crypto/tls +// which have security issues. +func InsecureTLSCipherNames() []string { + cipherKeys := sets.NewString() + for key := range insecureCiphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +// PreferredTLSCipherNames returns a list of cipher suite names implemented by crypto/tls. +func PreferredTLSCipherNames() []string { + cipherKeys := sets.NewString() + for key := range ciphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +func allCiphers() map[string]uint16 { + acceptedCiphers := make(map[string]uint16, len(ciphers)+len(insecureCiphers)) + for k, v := range ciphers { + acceptedCiphers[k] = v + } + for k, v := range insecureCiphers { + acceptedCiphers[k] = v + } + return acceptedCiphers +} + +// TLSCipherPossibleValues returns all acceptable cipher suite names. +// This is a combination of both InsecureTLSCipherNames() and PreferredTLSCipherNames(). +func TLSCipherPossibleValues() []string { + cipherKeys := sets.NewString() + acceptedCiphers := allCiphers() + for key := range acceptedCiphers { + cipherKeys.Insert(key) + } + return cipherKeys.List() +} + +// TLSCipherSuites returns a list of cipher suite IDs from the cipher suite names passed. +func TLSCipherSuites(cipherNames []string) ([]uint16, error) { + if len(cipherNames) == 0 { + return nil, nil + } + ciphersIntSlice := make([]uint16, 0) + possibleCiphers := allCiphers() + for _, cipher := range cipherNames { + intValue, ok := possibleCiphers[cipher] + if !ok { + return nil, fmt.Errorf("Cipher suite %s not supported or doesn't exist", cipher) + } + ciphersIntSlice = append(ciphersIntSlice, intValue) + } + return ciphersIntSlice, nil +} + +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSPossibleVersions returns all acceptable values for TLS Version. +func TLSPossibleVersions() []string { + versionsKeys := sets.NewString() + for key := range versions { + versionsKeys.Insert(key) + } + return versionsKeys.List() +} + +// TLSVersion returns the TLS Version ID for the version name passed. +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} + +// DefaultTLSVersion defines the default TLS Version. +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} diff --git a/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go new file mode 100644 index 00000000..728fa520 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/colon_separated_multimap_string_string.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding +// that separates keys from values with ':' and separates key-value pairs with ','. +// A key can be repeated multiple times, in which case the values are appended to a +// slice of strings associated with that key. Items in the list associated with a given +// key will appear in the order provided. +// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map. +// This makes it possible to override default values with a command-line option rather than appending to defaults, +// while still allowing the distribution of key-value pairs across multiple flag invocations. +// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}` +type ColonSeparatedMultimapStringString struct { + Multimap *map[string][]string + initialized bool // set to true after the first Set call + allowDefaultEmptyKey bool +} + +// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. +func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m} +} + +// NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey takes a pointer to a map[string][]string and returns the +// ColonSeparatedMultimapStringString flag parsing shim for that map. It allows default empty key with no colon in the flag. +func NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(m *map[string][]string) *ColonSeparatedMultimapStringString { + return &ColonSeparatedMultimapStringString{Multimap: m, allowDefaultEmptyKey: true} +} + +// Set implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Set(value string) error { + if m.Multimap == nil { + return fmt.Errorf("no target (nil pointer to map[string][]string)") + } + if !m.initialized || *m.Multimap == nil { + // clear default values, or allocate if no existing map + *m.Multimap = make(map[string][]string) + m.initialized = true + } + for _, pair := range strings.Split(value, ",") { + if len(pair) == 0 { + continue + } + kv := strings.SplitN(pair, ":", 2) + var k, v string + if m.allowDefaultEmptyKey && len(kv) == 1 { + v = strings.TrimSpace(kv[0]) + } else { + if len(kv) != 2 { + return fmt.Errorf("malformed pair, expect string:string") + } + k = strings.TrimSpace(kv[0]) + v = strings.TrimSpace(kv[1]) + } + (*m.Multimap)[k] = append((*m.Multimap)[k], v) + } + return nil +} + +// String implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) String() string { + type kv struct { + k string + v string + } + kvs := make([]kv, 0, len(*m.Multimap)) + for k, vs := range *m.Multimap { + for i := range vs { + kvs = append(kvs, kv{k: k, v: vs[i]}) + } + } + // stable sort by keys, order of values should be preserved + sort.SliceStable(kvs, func(i, j int) bool { + return kvs[i].k < kvs[j].k + }) + pairs := make([]string, 0, len(kvs)) + for i := range kvs { + pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v)) + } + return strings.Join(pairs, ",") +} + +// Type implements github.com/spf13/pflag.Value +func (m *ColonSeparatedMultimapStringString) Type() string { + return "colonSeparatedMultimapStringString" +} + +// Empty implements OmitEmpty +func (m *ColonSeparatedMultimapStringString) Empty() bool { + return len(*m.Multimap) == 0 +} diff --git a/vendor/k8s.io/component-base/cli/flag/configuration_map.go b/vendor/k8s.io/component-base/cli/flag/configuration_map.go new file mode 100644 index 00000000..911b05ec --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/configuration_map.go @@ -0,0 +1,53 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +type ConfigurationMap map[string]string + +func (m *ConfigurationMap) String() string { + pairs := []string{} + for k, v := range *m { + pairs = append(pairs, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (m *ConfigurationMap) Set(value string) error { + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + if len(arr) == 2 { + (*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1]) + } else { + (*m)[strings.TrimSpace(arr[0])] = "" + } + } + return nil +} + +func (*ConfigurationMap) Type() string { + return "mapStringString" +} diff --git a/vendor/k8s.io/component-base/cli/flag/flags.go b/vendor/k8s.io/component-base/cli/flag/flags.go new file mode 100644 index 00000000..2388340d --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/flags.go @@ -0,0 +1,66 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "strings" + + "github.com/spf13/pflag" + "k8s.io/klog/v2" +) + +var underscoreWarnings = make(map[string]bool) + +// WordSepNormalizeFunc changes all flags that contain "_" separators +func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + return pflag.NormalizedName(strings.Replace(name, "_", "-", -1)) + } + return pflag.NormalizedName(name) +} + +// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators +func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { + if strings.Contains(name, "_") { + nname := strings.Replace(name, "_", "-", -1) + if _, alreadyWarned := underscoreWarnings[name]; !alreadyWarned { + klog.Warningf("using an underscore in a flag name is not supported. %s has been converted to %s.", name, nname) + underscoreWarnings[name] = true + } + + return pflag.NormalizedName(nname) + } + return pflag.NormalizedName(name) +} + +// InitFlags normalizes, parses, then logs the command line flags +func InitFlags() { + pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) + pflag.Parse() + pflag.VisitAll(func(flag *pflag.Flag) { + klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) +} + +// PrintFlags logs the flags in the flagset +func PrintFlags(flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value) + }) +} diff --git a/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go b/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go new file mode 100644 index 00000000..bf8dbfb9 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/langle_separated_map_string_string.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "sort" + "strings" +) + +// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string 0 { + s = s + ":" + strings.Join(nkc.Names, ",") + } + return s +} + +func (nkc *NamedCertKey) Set(value string) error { + cs := strings.SplitN(value, ":", 2) + var keycert string + if len(cs) == 2 { + var names string + keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1]) + if names == "" { + return errors.New("empty names list is not allowed") + } + nkc.Names = nil + for _, name := range strings.Split(names, ",") { + nkc.Names = append(nkc.Names, strings.TrimSpace(name)) + } + } else { + nkc.Names = nil + keycert = strings.TrimSpace(cs[0]) + } + cs = strings.Split(keycert, ",") + if len(cs) != 2 { + return errors.New("expected comma separated certificate and key file paths") + } + nkc.CertFile = strings.TrimSpace(cs[0]) + nkc.KeyFile = strings.TrimSpace(cs[1]) + return nil +} + +func (*NamedCertKey) Type() string { + return "namedCertKey" +} + +// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own +// flag instance (in contrast to comma separated slices). +type NamedCertKeyArray struct { + value *[]NamedCertKey + changed bool +} + +var _ flag.Value = &NamedCertKeyArray{} + +// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value +// pointing to p. +func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray { + return &NamedCertKeyArray{ + value: p, + } +} + +func (a *NamedCertKeyArray) Set(val string) error { + nkc := NamedCertKey{} + err := nkc.Set(val) + if err != nil { + return err + } + if !a.changed { + *a.value = []NamedCertKey{nkc} + a.changed = true + } else { + *a.value = append(*a.value, nkc) + } + return nil +} + +func (a *NamedCertKeyArray) Type() string { + return "namedCertKey" +} + +func (a *NamedCertKeyArray) String() string { + nkcs := make([]string, 0, len(*a.value)) + for i := range *a.value { + nkcs = append(nkcs, (*a.value)[i].String()) + } + return "[" + strings.Join(nkcs, ";") + "]" +} diff --git a/vendor/k8s.io/component-base/cli/flag/noop.go b/vendor/k8s.io/component-base/cli/flag/noop.go new file mode 100644 index 00000000..03f7f14c --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/noop.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "github.com/spf13/pflag" +) + +// NoOp implements goflag.Value and plfag.Value, +// but has a noop Set implementation +type NoOp struct{} + +var _ goflag.Value = NoOp{} +var _ pflag.Value = NoOp{} + +func (NoOp) String() string { + return "" +} + +func (NoOp) Set(val string) error { + return nil +} + +func (NoOp) Type() string { + return "NoOp" +} diff --git a/vendor/k8s.io/component-base/cli/flag/omitempty.go b/vendor/k8s.io/component-base/cli/flag/omitempty.go new file mode 100644 index 00000000..c354754e --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/omitempty.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +// OmitEmpty is an interface for flags to report whether their underlying value +// is "empty." If a flag implements OmitEmpty and returns true for a call to Empty(), +// it is assumed that flag may be omitted from the command line. +type OmitEmpty interface { + Empty() bool +} diff --git a/vendor/k8s.io/component-base/cli/flag/sectioned.go b/vendor/k8s.io/component-base/cli/flag/sectioned.go new file mode 100644 index 00000000..23574287 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/sectioned.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const ( + usageFmt = "Usage:\n %s\n" +) + +// NamedFlagSets stores named flag sets in the order of calling FlagSet. +type NamedFlagSets struct { + // Order is an ordered list of flag set names. + Order []string + // FlagSets stores the flag sets by name. + FlagSets map[string]*pflag.FlagSet + // NormalizeNameFunc is the normalize function which used to initialize FlagSets created by NamedFlagSets. + NormalizeNameFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName +} + +// FlagSet returns the flag set with the given name and adds it to the +// ordered name list if it is not in there yet. +func (nfs *NamedFlagSets) FlagSet(name string) *pflag.FlagSet { + if nfs.FlagSets == nil { + nfs.FlagSets = map[string]*pflag.FlagSet{} + } + if _, ok := nfs.FlagSets[name]; !ok { + flagSet := pflag.NewFlagSet(name, pflag.ExitOnError) + flagSet.SetNormalizeFunc(pflag.CommandLine.GetNormalizeFunc()) + if nfs.NormalizeNameFunc != nil { + flagSet.SetNormalizeFunc(nfs.NormalizeNameFunc) + } + nfs.FlagSets[name] = flagSet + nfs.Order = append(nfs.Order, name) + } + return nfs.FlagSets[name] +} + +// PrintSections prints the given names flag sets in sections, with the maximal given column number. +// If cols is zero, lines are not wrapped. +func PrintSections(w io.Writer, fss NamedFlagSets, cols int) { + for _, name := range fss.Order { + fs := fss.FlagSets[name] + if !fs.HasFlags() { + continue + } + + wideFS := pflag.NewFlagSet("", pflag.ExitOnError) + wideFS.AddFlagSet(fs) + + var zzz string + if cols > 24 { + zzz = strings.Repeat("z", cols-24) + wideFS.Int(zzz, 0, strings.Repeat("z", cols-24)) + } + + var buf bytes.Buffer + fmt.Fprintf(&buf, "\n%s flags:\n\n%s", strings.ToUpper(name[:1])+name[1:], wideFS.FlagUsagesWrapped(cols)) + + if cols > 24 { + i := strings.Index(buf.String(), zzz) + lines := strings.Split(buf.String()[:i], "\n") + fmt.Fprint(w, strings.Join(lines[:len(lines)-1], "\n")) + fmt.Fprintln(w) + } else { + fmt.Fprint(w, buf.String()) + } + } +} + +// SetUsageAndHelpFunc set both usage and help function. +// Print the flag sets we need instead of all of them. +func SetUsageAndHelpFunc(cmd *cobra.Command, fss NamedFlagSets, cols int) { + cmd.SetUsageFunc(func(cmd *cobra.Command) error { + fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine()) + PrintSections(cmd.OutOrStderr(), fss, cols) + return nil + }) + cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) { + fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine()) + PrintSections(cmd.OutOrStdout(), fss, cols) + }) +} diff --git a/vendor/k8s.io/component-base/cli/flag/string_flag.go b/vendor/k8s.io/component-base/cli/flag/string_flag.go new file mode 100644 index 00000000..331bdb66 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/string_flag.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not. +type StringFlag struct { + // If Set has been invoked this value is true + provided bool + // The exact value provided on the flag + value string +} + +func NewStringFlag(defaultVal string) StringFlag { + return StringFlag{value: defaultVal} +} + +func (f *StringFlag) Default(value string) { + f.value = value +} + +func (f StringFlag) String() string { + return f.value +} + +func (f StringFlag) Value() string { + return f.value +} + +func (f *StringFlag) Set(value string) error { + f.value = value + f.provided = true + + return nil +} + +func (f StringFlag) Provided() bool { + return f.provided +} + +func (f *StringFlag) Type() string { + return "string" +} diff --git a/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go b/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go new file mode 100644 index 00000000..ad0d07d7 --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/string_slice_flag.go @@ -0,0 +1,62 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + goflag "flag" + "fmt" + "strings" + + "github.com/spf13/pflag" +) + +// StringSlice implements goflag.Value and plfag.Value, +// and allows set to be invoked repeatedly to accumulate values. +type StringSlice struct { + value *[]string + changed bool +} + +func NewStringSlice(s *[]string) *StringSlice { + return &StringSlice{value: s} +} + +var _ goflag.Value = &StringSlice{} +var _ pflag.Value = &StringSlice{} + +func (s *StringSlice) String() string { + if s == nil || s.value == nil { + return "" + } + return strings.Join(*s.value, " ") +} + +func (s *StringSlice) Set(val string) error { + if s.value == nil { + return fmt.Errorf("no target (nil pointer to []string)") + } + if !s.changed { + *s.value = make([]string, 0) + } + *s.value = append(*s.value, val) + s.changed = true + return nil +} + +func (StringSlice) Type() string { + return "sliceString" +} diff --git a/vendor/k8s.io/component-base/cli/flag/tristate.go b/vendor/k8s.io/component-base/cli/flag/tristate.go new file mode 100644 index 00000000..cf16376b --- /dev/null +++ b/vendor/k8s.io/component-base/cli/flag/tristate.go @@ -0,0 +1,83 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flag + +import ( + "fmt" + "strconv" +) + +// Tristate is a flag compatible with flags and pflags that +// keeps track of whether it had a value supplied or not. +type Tristate int + +const ( + Unset Tristate = iota // 0 + True + False +) + +func (f *Tristate) Default(value bool) { + *f = triFromBool(value) +} + +func (f Tristate) String() string { + b := boolFromTri(f) + return fmt.Sprintf("%t", b) +} + +func (f Tristate) Value() bool { + b := boolFromTri(f) + return b +} + +func (f *Tristate) Set(value string) error { + boolVal, err := strconv.ParseBool(value) + if err != nil { + return err + } + + *f = triFromBool(boolVal) + return nil +} + +func (f Tristate) Provided() bool { + if f != Unset { + return true + } + return false +} + +func (f *Tristate) Type() string { + return "tristate" +} + +func boolFromTri(t Tristate) bool { + if t == True { + return true + } else { + return false + } +} + +func triFromBool(b bool) Tristate { + if b { + return True + } else { + return False + } +} diff --git a/vendor/k8s.io/component-base/featuregate/OWNERS b/vendor/k8s.io/component-base/featuregate/OWNERS new file mode 100644 index 00000000..b2f165b6 --- /dev/null +++ b/vendor/k8s.io/component-base/featuregate/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# Currently assigned to api-approvers since feature gates are the API +# for enabling/disabling other APIs. + +# Disable inheritance as this is an api owners file +options: + no_parent_owners: true +approvers: + - api-approvers +reviewers: + - api-reviewers +labels: + - kind/api-change + - sig/api-machinery + - sig/cluster-lifecycle diff --git a/vendor/k8s.io/component-base/featuregate/feature_gate.go b/vendor/k8s.io/component-base/featuregate/feature_gate.go new file mode 100644 index 00000000..b6f08a6c --- /dev/null +++ b/vendor/k8s.io/component-base/featuregate/feature_gate.go @@ -0,0 +1,738 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package featuregate + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/spf13/pflag" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/naming" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/version" + featuremetrics "k8s.io/component-base/metrics/prometheus/feature" + baseversion "k8s.io/component-base/version" + "k8s.io/klog/v2" +) + +type Feature string + +const ( + flagName = "feature-gates" + + // allAlphaGate is a global toggle for alpha features. Per-feature key + // values override the default set by allAlphaGate. Examples: + // AllAlpha=false,NewFeature=true will result in newFeature=true + // AllAlpha=true,NewFeature=false will result in newFeature=false + allAlphaGate Feature = "AllAlpha" + + // allBetaGate is a global toggle for beta features. Per-feature key + // values override the default set by allBetaGate. Examples: + // AllBeta=false,NewFeature=true will result in NewFeature=true + // AllBeta=true,NewFeature=false will result in NewFeature=false + allBetaGate Feature = "AllBeta" +) + +var ( + // The generic features. + defaultFeatures = map[Feature]VersionedSpecs{ + allAlphaGate: {{Default: false, PreRelease: Alpha, Version: version.MajorMinor(0, 0)}}, + allBetaGate: {{Default: false, PreRelease: Beta, Version: version.MajorMinor(0, 0)}}, + } + + // Special handling for a few gates. + specialFeatures = map[Feature]func(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version){ + allAlphaGate: setUnsetAlphaGates, + allBetaGate: setUnsetBetaGates, + } +) + +type FeatureSpec struct { + // Default is the default enablement state for the feature + Default bool + // LockToDefault indicates that the feature is locked to its default and cannot be changed + LockToDefault bool + // PreRelease indicates the current maturity level of the feature + PreRelease prerelease + // Version indicates the earliest version from which this FeatureSpec is valid. + // If multiple FeatureSpecs exist for a Feature, the one with the highest version that is less + // than or equal to the effective version of the component is used. + Version *version.Version +} + +type VersionedSpecs []FeatureSpec + +func (g VersionedSpecs) Len() int { return len(g) } +func (g VersionedSpecs) Less(i, j int) bool { + return g[i].Version.LessThan(g[j].Version) +} +func (g VersionedSpecs) Swap(i, j int) { g[i], g[j] = g[j], g[i] } + +type PromotionVersionMapping map[prerelease]string + +type prerelease string + +const ( + PreAlpha = prerelease("PRE-ALPHA") + // Values for PreRelease. + Alpha = prerelease("ALPHA") + Beta = prerelease("BETA") + GA = prerelease("") + + // Deprecated + Deprecated = prerelease("DEPRECATED") +) + +// FeatureGate indicates whether a given feature is enabled or not +type FeatureGate interface { + // Enabled returns true if the key is enabled. + Enabled(key Feature) bool + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []string + // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be + // set on the copy without mutating the original. This is useful for validating + // config against potential feature gate changes before committing those changes. + DeepCopy() MutableVersionedFeatureGate + // Validate checks if the flag gates are valid at the emulated version. + Validate() []error +} + +// MutableFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +type MutableFeatureGate interface { + FeatureGate + + // AddFlag adds a flag for setting global feature gates to the specified FlagSet. + AddFlag(fs *pflag.FlagSet) + // Close sets closed to true, and prevents subsequent calls to Add + Close() + // Set parses and stores flag gates for known features + // from a string like feature1=true,feature2=false,... + Set(value string) error + // SetFromMap stores flag gates for known features from a map[string]bool or returns an error + SetFromMap(m map[string]bool) error + // Add adds features to the featureGate. + Add(features map[Feature]FeatureSpec) error + // GetAll returns a copy of the map of known feature names to feature specs. + GetAll() map[Feature]FeatureSpec + // AddMetrics adds feature enablement metrics + AddMetrics() + // OverrideDefault sets a local override for the registered default value of a named + // feature. If the feature has not been previously registered (e.g. by a call to Add), has a + // locked default, or if the gate has already registered itself with a FlagSet, a non-nil + // error is returned. + // + // When two or more components consume a common feature, one component can override its + // default at runtime in order to adopt new defaults before or after the other + // components. For example, a new feature can be evaluated with a limited blast radius by + // overriding its default to true for a limited number of components without simultaneously + // changing its default for all consuming components. + OverrideDefault(name Feature, override bool) error +} + +// MutableVersionedFeatureGate parses and stores flag gates for known features from +// a string like feature1=true,feature2=false,... +// MutableVersionedFeatureGate sets options based on the emulated version of the featured gate. +type MutableVersionedFeatureGate interface { + MutableFeatureGate + // EmulationVersion returns the version the feature gate is set to emulate. + // If set, the feature gate would enable/disable features based on + // feature availability and pre-release at the emulated version instead of the binary version. + EmulationVersion() *version.Version + // SetEmulationVersion overrides the emulationVersion of the feature gate. + // Otherwise, the emulationVersion will be the same as the binary version. + // If set, the feature defaults and availability will be as if the binary is at the emulated version. + SetEmulationVersion(emulationVersion *version.Version) error + // GetAll returns a copy of the map of known feature names to versioned feature specs. + GetAllVersioned() map[Feature]VersionedSpecs + // AddVersioned adds versioned feature specs to the featureGate. + AddVersioned(features map[Feature]VersionedSpecs) error + // OverrideDefaultAtVersion sets a local override for the registered default value of a named + // feature for the prerelease lifecycle the given version is at. + // If the feature has not been previously registered (e.g. by a call to Add), + // has a locked default, or if the gate has already registered itself with a FlagSet, a non-nil + // error is returned. + // + // When two or more components consume a common feature, one component can override its + // default at runtime in order to adopt new defaults before or after the other + // components. For example, a new feature can be evaluated with a limited blast radius by + // overriding its default to true for a limited number of components without simultaneously + // changing its default for all consuming components. + OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error + // ExplicitlySet returns true if the feature value is explicitly set instead of + // being derived from the default values or special features. + ExplicitlySet(name Feature) bool + // ResetFeatureValueToDefault resets the value of the feature back to the default value. + ResetFeatureValueToDefault(name Feature) error + // DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides, + // and resets all the enabled status of the new feature gate. + // This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate. + DeepCopyAndReset() MutableVersionedFeatureGate +} + +// featureGate implements FeatureGate as well as pflag.Value for flag parsing. +type featureGate struct { + featureGateName string + + special map[Feature]func(map[Feature]VersionedSpecs, map[Feature]bool, bool, *version.Version) + + // lock guards writes to all below fields. + lock sync.Mutex + // known holds a map[Feature]FeatureSpec + known atomic.Value + // enabled holds a map[Feature]bool + enabled atomic.Value + // enabledRaw holds a raw map[string]bool of the parsed flag. + // It keeps the original values of "special" features like "all alpha gates", + // while enabled keeps the values of all resolved features. + enabledRaw atomic.Value + // closed is set to true when AddFlag is called, and prevents subsequent calls to Add + closed bool + // queriedFeatures stores all the features that have been queried through the Enabled interface. + // It is reset when SetEmulationVersion is called. + queriedFeatures atomic.Value + emulationVersion atomic.Pointer[version.Version] +} + +func setUnsetAlphaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) { + for k, v := range known { + if k == "AllAlpha" || k == "AllBeta" { + continue + } + featureSpec := featureSpecAtEmulationVersion(v, cVer) + if featureSpec.PreRelease == Alpha { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +func setUnsetBetaGates(known map[Feature]VersionedSpecs, enabled map[Feature]bool, val bool, cVer *version.Version) { + for k, v := range known { + if k == "AllAlpha" || k == "AllBeta" { + continue + } + featureSpec := featureSpecAtEmulationVersion(v, cVer) + if featureSpec.PreRelease == Beta { + if _, found := enabled[k]; !found { + enabled[k] = val + } + } + } +} + +// Set, String, and Type implement pflag.Value +var _ pflag.Value = &featureGate{} + +// internalPackages are packages that ignored when creating a name for featureGates. These packages are in the common +// call chains, so they'd be unhelpful as names. +var internalPackages = []string{"k8s.io/component-base/featuregate/feature_gate.go"} + +// NewVersionedFeatureGate creates a feature gate with the emulation version set to the provided version. +// SetEmulationVersion can be called after to change emulation version to a desired value. +func NewVersionedFeatureGate(emulationVersion *version.Version) *featureGate { + known := map[Feature]VersionedSpecs{} + for k, v := range defaultFeatures { + known[k] = v + } + + f := &featureGate{ + featureGateName: naming.GetNameFromCallsite(internalPackages...), + special: specialFeatures, + } + f.known.Store(known) + f.enabled.Store(map[Feature]bool{}) + f.enabledRaw.Store(map[string]bool{}) + f.emulationVersion.Store(emulationVersion) + f.queriedFeatures.Store(sets.Set[Feature]{}) + klog.V(1).Infof("new feature gate with emulationVersion=%s", f.emulationVersion.Load().String()) + return f +} + +// NewFeatureGate creates a feature gate with the current binary version. +func NewFeatureGate() *featureGate { + binaryVersison := version.MustParse(baseversion.DefaultKubeBinaryVersion) + return NewVersionedFeatureGate(binaryVersison) +} + +// Set parses a string of the form "key1=value1,key2=value2,..." into a +// map[string]bool of known keys or returns an error. +func (f *featureGate) Set(value string) error { + m := make(map[string]bool) + for _, s := range strings.Split(value, ",") { + if len(s) == 0 { + continue + } + arr := strings.SplitN(s, "=", 2) + k := strings.TrimSpace(arr[0]) + if len(arr) != 2 { + return fmt.Errorf("missing bool value for %s", k) + } + v := strings.TrimSpace(arr[1]) + boolValue, err := strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("invalid value of %s=%s, err: %v", k, v, err) + } + m[k] = boolValue + } + return f.SetFromMap(m) +} + +// Validate checks if the flag gates are valid at the emulated version. +func (f *featureGate) Validate() []error { + f.lock.Lock() + defer f.lock.Unlock() + m, ok := f.enabledRaw.Load().(map[string]bool) + if !ok { + return []error{fmt.Errorf("cannot cast enabledRaw to map[string]bool")} + } + enabled := map[Feature]bool{} + return f.unsafeSetFromMap(enabled, m, f.EmulationVersion()) +} + +// unsafeSetFromMap stores flag gates for known features from a map[string]bool into an enabled map. +func (f *featureGate) unsafeSetFromMap(enabled map[Feature]bool, m map[string]bool, emulationVersion *version.Version) []error { + var errs []error + // Copy existing state + known := map[Feature]VersionedSpecs{} + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + sort.Sort(v) + known[k] = v + } + + for k, v := range m { + key := Feature(k) + versionedSpecs, ok := known[key] + if !ok { + // early return if encounters an unknown feature. + errs = append(errs, fmt.Errorf("unrecognized feature gate: %s", k)) + return errs + } + featureSpec := featureSpecAtEmulationVersion(versionedSpecs, emulationVersion) + if featureSpec.LockToDefault && featureSpec.Default != v { + errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default)) + continue + } + // Handle "special" features like "all alpha gates" + if fn, found := f.special[key]; found { + fn(known, enabled, v, emulationVersion) + enabled[key] = v + continue + } + if featureSpec.PreRelease == PreAlpha { + errs = append(errs, fmt.Errorf("cannot set feature gate %v to %v, feature is PreAlpha at emulated version %s", k, v, emulationVersion.String())) + continue + } + enabled[key] = v + + if featureSpec.PreRelease == Deprecated { + klog.Warningf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v) + } else if featureSpec.PreRelease == GA { + klog.Warningf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v) + } + } + return errs +} + +// SetFromMap stores flag gates for known features from a map[string]bool or returns an error +func (f *featureGate) SetFromMap(m map[string]bool) error { + f.lock.Lock() + defer f.lock.Unlock() + + // Copy existing state + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + + // Update enabledRaw first. + // SetFromMap might be called when emulationVersion is not finalized yet, and we do not know the final state of enabled. + // But the flags still need to be saved. + for k, v := range m { + enabledRaw[k] = v + } + f.enabledRaw.Store(enabledRaw) + + errs := f.unsafeSetFromMap(enabled, enabledRaw, f.EmulationVersion()) + if len(errs) == 0 { + // Persist changes + f.enabled.Store(enabled) + klog.V(1).Infof("feature gates: %v", f.enabled) + } + return utilerrors.NewAggregate(errs) +} + +// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...". +func (f *featureGate) String() string { + pairs := []string{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + pairs = append(pairs, fmt.Sprintf("%s=%t", k, v)) + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +func (f *featureGate) Type() string { + return "mapStringBool" +} + +// Add adds features to the featureGate. +func (f *featureGate) Add(features map[Feature]FeatureSpec) error { + vs := map[Feature]VersionedSpecs{} + for name, spec := range features { + // if no version is provided for the FeatureSpec, it is defaulted to version 0.0 so that it can be enabled/disabled regardless of emulation version. + spec.Version = version.MajorMinor(0, 0) + vs[name] = VersionedSpecs{spec} + } + return f.AddVersioned(vs) +} + +// AddVersioned adds versioned feature specs to the featureGate. +func (f *featureGate) AddVersioned(features map[Feature]VersionedSpecs) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot add a feature gate after adding it to the flag set") + } + + // Copy existing state + known := f.GetAllVersioned() + + for name, specs := range features { + sort.Sort(specs) + if existingSpec, found := known[name]; found { + sort.Sort(existingSpec) + if reflect.DeepEqual(existingSpec, specs) { + continue + } + return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec) + } + known[name] = specs + } + + // Persist updated state + f.known.Store(known) + + return nil +} + +func (f *featureGate) OverrideDefault(name Feature, override bool) error { + return f.OverrideDefaultAtVersion(name, override, f.EmulationVersion()) +} + +func (f *featureGate) OverrideDefaultAtVersion(name Feature, override bool, ver *version.Version) error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.closed { + return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name) + } + + // Copy existing state + known := f.GetAllVersioned() + + specs, ok := known[name] + if !ok { + return fmt.Errorf("cannot override default: feature %q is not registered", name) + } + spec := featureSpecAtEmulationVersion(specs, ver) + switch { + case spec.LockToDefault: + return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default) + case spec.PreRelease == PreAlpha: + return fmt.Errorf("cannot override default: feature %q is not available before version %s", name, ver.String()) + case spec.PreRelease == Deprecated: + klog.Warningf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override) + case spec.PreRelease == GA: + klog.Warningf("Overriding default of GA feature gate %s=%t. It will be removed in a future release.", name, override) + } + + spec.Default = override + known[name] = specs + f.known.Store(known) + + return nil +} + +// GetAll returns a copy of the map of known feature names to feature specs for the current emulationVersion. +func (f *featureGate) GetAll() map[Feature]FeatureSpec { + retval := map[Feature]FeatureSpec{} + f.lock.Lock() + versionedSpecs := f.GetAllVersioned() + emuVer := f.EmulationVersion() + f.lock.Unlock() + for k, v := range versionedSpecs { + spec := featureSpecAtEmulationVersion(v, emuVer) + if spec.PreRelease == PreAlpha { + // The feature is not available at the emulation version. + continue + } + retval[k] = *spec + } + return retval +} + +// GetAllVersioned returns a copy of the map of known feature names to versioned feature specs. +func (f *featureGate) GetAllVersioned() map[Feature]VersionedSpecs { + retval := map[Feature]VersionedSpecs{} + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + vCopy := make([]FeatureSpec, len(v)) + _ = copy(vCopy, v) + retval[k] = vCopy + } + return retval +} + +func (f *featureGate) SetEmulationVersion(emulationVersion *version.Version) error { + if emulationVersion.EqualTo(f.EmulationVersion()) { + return nil + } + f.lock.Lock() + defer f.lock.Unlock() + klog.V(1).Infof("set feature gate emulationVersion to %s", emulationVersion.String()) + + // Copy existing state + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + // enabled map should be reset whenever emulationVersion is changed. + enabled := map[Feature]bool{} + errs := f.unsafeSetFromMap(enabled, enabledRaw, emulationVersion) + + queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature]) + known := f.known.Load().(map[Feature]VersionedSpecs) + for feature := range queriedFeatures { + newVal := featureEnabled(feature, enabled, known, emulationVersion) + oldVal := featureEnabled(feature, f.enabled.Load().(map[Feature]bool), known, f.EmulationVersion()) + if newVal != oldVal { + klog.Warningf("SetEmulationVersion will change already queried feature:%s from %v to %v", feature, oldVal, newVal) + } + } + + if len(errs) == 0 { + // Persist changes + f.enabled.Store(enabled) + f.emulationVersion.Store(emulationVersion) + f.queriedFeatures.Store(sets.Set[Feature]{}) + } + return utilerrors.NewAggregate(errs) +} + +func (f *featureGate) EmulationVersion() *version.Version { + return f.emulationVersion.Load() +} + +// featureSpec returns the featureSpec at the EmulationVersion if the key exists, an error otherwise. +// This is useful to keep multiple implementations of a feature based on the PreRelease or Version info. +func (f *featureGate) featureSpec(key Feature) (FeatureSpec, error) { + if v, ok := f.known.Load().(map[Feature]VersionedSpecs)[key]; ok { + featureSpec := f.featureSpecAtEmulationVersion(v) + return *featureSpec, nil + } + return FeatureSpec{}, fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName) +} + +func (f *featureGate) unsafeRecordQueried(key Feature) { + queriedFeatures := f.queriedFeatures.Load().(sets.Set[Feature]) + if _, ok := queriedFeatures[key]; ok { + return + } + // Clone items from queriedFeatures before mutating it + newQueriedFeatures := queriedFeatures.Clone() + newQueriedFeatures.Insert(key) + f.queriedFeatures.Store(newQueriedFeatures) +} + +func featureEnabled(key Feature, enabled map[Feature]bool, known map[Feature]VersionedSpecs, emulationVersion *version.Version) bool { + // check explicitly set enabled list + if v, ok := enabled[key]; ok { + return v + } + if v, ok := known[key]; ok { + return featureSpecAtEmulationVersion(v, emulationVersion).Default + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGate", key)) +} + +// Enabled returns true if the key is enabled. If the key is not known, this call will panic. +func (f *featureGate) Enabled(key Feature) bool { + // TODO: ideally we should lock the feature gate in this call to be safe, need to evaluate how much performance impact locking would have. + v := featureEnabled(key, f.enabled.Load().(map[Feature]bool), f.known.Load().(map[Feature]VersionedSpecs), f.EmulationVersion()) + f.unsafeRecordQueried(key) + return v +} + +func (f *featureGate) featureSpecAtEmulationVersion(v VersionedSpecs) *FeatureSpec { + return featureSpecAtEmulationVersion(v, f.EmulationVersion()) +} + +func featureSpecAtEmulationVersion(v VersionedSpecs, emulationVersion *version.Version) *FeatureSpec { + i := len(v) - 1 + for ; i >= 0; i-- { + if v[i].Version.GreaterThan(emulationVersion) { + continue + } + return &v[i] + } + return &FeatureSpec{ + Default: false, + PreRelease: PreAlpha, + Version: version.MajorMinor(0, 0), + } +} + +// Close sets closed to true, and prevents subsequent calls to Add +func (f *featureGate) Close() { + f.lock.Lock() + f.closed = true + f.lock.Unlock() +} + +// AddFlag adds a flag for setting global feature gates to the specified FlagSet. +func (f *featureGate) AddFlag(fs *pflag.FlagSet) { + // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead? + // Not all components expose a feature gates flag using this AddFlag method, and + // in the future, all components will completely stop exposing a feature gates flag, + // in favor of componentconfig. + f.Close() + + known := f.KnownFeatures() + fs.Var(f, flagName, ""+ + "A set of key=value pairs that describe feature gates for alpha/experimental features. "+ + "Options are:\n"+strings.Join(known, "\n")) +} + +func (f *featureGate) AddMetrics() { + for feature, featureSpec := range f.GetAll() { + featuremetrics.RecordFeatureInfo(context.Background(), string(feature), string(featureSpec.PreRelease), f.Enabled(feature)) + } +} + +// KnownFeatures returns a slice of strings describing the FeatureGate's known features. +// preAlpha, Deprecated and GA features are hidden from the list. +func (f *featureGate) KnownFeatures() []string { + var known []string + for k, v := range f.known.Load().(map[Feature]VersionedSpecs) { + if k == "AllAlpha" || k == "AllBeta" { + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v[0].PreRelease, v[0].Default)) + continue + } + featureSpec := f.featureSpecAtEmulationVersion(v) + if featureSpec.PreRelease == GA || featureSpec.PreRelease == Deprecated || featureSpec.PreRelease == PreAlpha { + continue + } + known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, featureSpec.PreRelease, featureSpec.Default)) + } + sort.Strings(known) + return known +} + +// DeepCopyAndReset copies all the registered features of the FeatureGate object, with all the known features and overrides, +// and resets all the enabled status of the new feature gate. +// This is useful for creating a new instance of feature gate without inheriting all the enabled configurations of the base feature gate. +func (f *featureGate) DeepCopyAndReset() MutableVersionedFeatureGate { + fg := NewVersionedFeatureGate(f.EmulationVersion()) + known := f.GetAllVersioned() + fg.known.Store(known) + return fg +} + +// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be +// set on the copy without mutating the original. This is useful for validating +// config against potential feature gate changes before committing those changes. +func (f *featureGate) DeepCopy() MutableVersionedFeatureGate { + f.lock.Lock() + defer f.lock.Unlock() + // Copy existing state. + known := f.GetAllVersioned() + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + + // Construct a new featureGate around the copied state. + // Note that specialFeatures is treated as immutable by convention, + // and we maintain the value of f.closed across the copy. + fg := &featureGate{ + special: specialFeatures, + closed: f.closed, + } + fg.emulationVersion.Store(f.EmulationVersion()) + fg.known.Store(known) + fg.enabled.Store(enabled) + fg.enabledRaw.Store(enabledRaw) + fg.queriedFeatures.Store(sets.Set[Feature]{}) + return fg +} + +// ExplicitlySet returns true if the feature value is explicitly set instead of +// being derived from the default values or special features. +func (f *featureGate) ExplicitlySet(name Feature) bool { + enabledRaw := f.enabledRaw.Load().(map[string]bool) + _, ok := enabledRaw[string(name)] + return ok +} + +// ResetFeatureValueToDefault resets the value of the feature back to the default value. +func (f *featureGate) ResetFeatureValueToDefault(name Feature) error { + f.lock.Lock() + defer f.lock.Unlock() + enabled := map[Feature]bool{} + for k, v := range f.enabled.Load().(map[Feature]bool) { + enabled[k] = v + } + enabledRaw := map[string]bool{} + for k, v := range f.enabledRaw.Load().(map[string]bool) { + enabledRaw[k] = v + } + _, inEnabled := enabled[name] + if inEnabled { + delete(enabled, name) + } + _, inEnabledRaw := enabledRaw[string(name)] + if inEnabledRaw { + delete(enabledRaw, string(name)) + } + // some features could be in enabled map but not enabledRaw map, + // for example some Alpha feature when AllAlpha is set. + if inEnabledRaw && !inEnabled { + return fmt.Errorf("feature:%s was explicitly set, but not in enabled map", name) + } + f.enabled.Store(enabled) + f.enabledRaw.Store(enabledRaw) + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 00000000..be371a4a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-instrumentation-approvers + - logicalhan + - RainbowMango +reviewers: + - sig-instrumentation-reviewers + - YoyinZyc +labels: + - sig/instrumentation diff --git a/vendor/k8s.io/component-base/metrics/buckets.go b/vendor/k8s.io/component-base/metrics/buckets.go new file mode 100644 index 00000000..27a57eb7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/buckets.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + +// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is +// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. +func ExponentialBucketsRange(min, max float64, count int) []float64 { + return prometheus.ExponentialBucketsRange(min, max, count) +} + +// MergeBuckets merges buckets together +func MergeBuckets(buckets ...[]float64) []float64 { + result := make([]float64, 1) + for _, s := range buckets { + result = append(result, s...) + } + return result +} diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 00000000..0718b6e1 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // HiddenMetrics tells the list of hidden metrics with fqName. + HiddenMetrics() []string +} + +// BaseStableCollector which implements almost all methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommended to inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability(). + registerable map[string]*Desc // stores registerable descriptors by pair, is a subset of descriptors. + hidden map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intended to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registerable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + if len(d.fqName) == 0 { + panic("nameless metrics will be not allowed") + } + + if bsc.descriptors == nil { + bsc.descriptors = make(map[string]*Desc) + } + + if _, exist := bsc.descriptors[d.fqName]; exist { + panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName)) + } + + bsc.descriptors[d.fqName] = d +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) { + if bsc.registerable == nil { + bsc.registerable = make(map[string]*Desc) + } + + bsc.registerable[d.fqName] = d +} + +func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) { + if bsc.hidden == nil { + bsc.hidden = make(map[string]*Desc) + } + + bsc.hidden[d.fqName] = d +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + d.create(version) + if d.IsHidden() { + bsc.trackHiddenDescriptor(d) + } else { + bsc.trackRegistrableDescriptor(d) + } + } + + if len(bsc.registerable) > 0 { + return true + } + + return false +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (bsc *BaseStableCollector) ClearState() { + for _, d := range bsc.descriptors { + d.ClearState() + } + + bsc.descriptors = nil + bsc.registerable = nil + bsc.hidden = nil + bsc.self = nil +} + +// HiddenMetrics tells the list of hidden metrics with fqName. +func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) { + for i := range bsc.hidden { + fqNames = append(fqNames, bsc.hidden[i].fqName) + } + return +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go new file mode 100644 index 00000000..5664a68a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -0,0 +1,242 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// Counter is our internal representation for our wrapping struct around prometheus +// counters. Counter implements both kubeCollector and CounterMetric. +type Counter struct { + CounterMetric + *CounterOpts + lazyMetric + selfCollector +} + +// The implementation of the Metric interface is expected by testutil.GetCounterMetricValue. +var _ Metric = &Counter{} + +// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounter(opts *CounterOpts) *Counter { + opts.StabilityLevel.setDefaults() + + kc := &Counter{ + CounterOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + kc.setPrometheusCounter(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +func (c *Counter) Desc() *prometheus.Desc { + return c.metric.Desc() +} + +func (c *Counter) Write(to *dto.Metric) error { + return c.metric.Write(to) +} + +// Reset resets the underlying prometheus Counter to start counting from 0 again +func (c *Counter) Reset() { + if !c.IsCreated() { + return + } + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement. +func (c *Counter) setPrometheusCounter(counter prometheus.Counter) { + c.CounterMetric = counter + c.initSelfCollection(counter) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (c *Counter) DeprecatedVersion() *semver.Version { + return parseSemver(c.CounterOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Counter. Until this method is called +// the underlying counter is a no-op. +func (c *Counter) initializeMetric() { + c.CounterOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus counter. + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method +// is called the underlying counter is a no-op. +func (c *Counter) initializeDeprecatedMetric() { + c.CounterOpts.markDeprecated() + c.initializeMetric() +} + +// WithContext allows the normal Counter metric to pass in context. The context is no-op now. +func (c *Counter) WithContext(ctx context.Context) CounterMetric { + return c.CounterMetric +} + +// CounterVec is the internal representation of our wrapping struct around prometheus +// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. +type CounterVec struct { + *prometheus.CounterVec + *CounterOpts + lazyMetric + originalLabels []string +} + +var _ kubeCollector = &CounterVec{} + +// TODO: make this true: var _ CounterVecMetric = &CounterVec{} + +// NewCounterVec returns an object which satisfies the kubeCollector and (almost) CounterVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated, and only members extracted after +// registration will actually measure anything. +func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &CounterVec{ + CounterVec: noopCounterVec, + CounterOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *CounterVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.CounterOpts.DeprecatedVersion) + +} + +// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeMetric() { + v.CounterOpts.annotateStabilityLevel() + v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeDeprecatedMetric() { + v.CounterOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created IFF the counterVec +// has been registered to a metrics registry. +func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.CounterVec.WithLabelValues(lvs...) +} + +// With returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created IFF the counterVec has +// been registered to a metrics registry. +func (v *CounterVec) With(labels map[string]string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.CounterVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *CounterVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.CounterVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} + +// WithContext returns wrapped CounterVec with context +func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext { + return &CounterVecWithContext{ + ctx: ctx, + CounterVec: v, + } +} + +// CounterVecWithContext is the wrapper of CounterVec with context. +type CounterVecWithContext struct { + *CounterVec + ctx context.Context +} + +// WithLabelValues is the wrapper of CounterVec.WithLabelValues. +func (vc *CounterVecWithContext) WithLabelValues(lvs ...string) CounterMetric { + return vc.CounterVec.WithLabelValues(lvs...) +} + +// With is the wrapper of CounterVec.With. +func (vc *CounterVecWithContext) With(labels map[string]string) CounterMetric { + return vc.CounterVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 00000000..2ca9cfa7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,225 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + annotatedHelp string + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + annotatedHelp: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}" +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +// IsCreated returns if metric has been created. +func (d *Desc) IsCreated() bool { + d.createLock.RLock() + defer d.createLock.RUnlock() + + return d.isCreated +} + +// create forces the initialization of Desc which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the Desc is deprecated or hidden, no-opting if the Desc should be considered +// hidden. Furthermore, this function no-opts and returns true if Desc is already +// created. +func (d *Desc) create(version *semver.Version) bool { + if version != nil { + d.determineDeprecationStatus(*version) + } + + // let's not create if this metric is slated to be hidden + if d.IsHidden() { + return false + } + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + d.isCreated = true + if d.IsDeprecated() { + d.initializeDeprecatedDesc() + } else { + d.initialize() + } + }) + return d.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (d *Desc) ClearState() { + d.isDeprecated = false + d.isHidden = false + d.isCreated = false + + d.markDeprecationOnce = *new(sync.Once) + d.createOnce = *new(sync.Once) + d.deprecateOnce = *new(sync.Once) + d.hideOnce = *new(sync.Once) + d.annotateOnce = *new(sync.Once) + + d.annotatedHelp = d.help + d.promDesc = nil +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 00000000..89631115 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,277 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both kubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +var _ GaugeMetric = &Gauge{} +var _ Registerable = &Gauge{} +var _ kubeCollector = &Gauge{} + +// NewGauge returns an object which satisfies the kubeCollector, Registerable, and Gauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + opts.StabilityLevel.setDefaults() + + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return parseSemver(g.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// WithContext allows the normal Gauge metric to pass in context. The context is no-op now. +func (g *Gauge) WithContext(ctx context.Context) GaugeMetric { + return g.GaugeMetric +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +var _ GaugeVecMetric = &GaugeVec{} +var _ Registerable = &GaugeVec{} +var _ kubeCollector = &GaugeVec{} + +// NewGaugeVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated, and only members extracted after +// registration will actually measure anything. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + elt, err := v.GaugeVec.GetMetricWithLabelValues(lvs...) + return elt, err +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + ans, err := v.WithLabelValuesChecked(lvs...) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + elt, err := v.GaugeVec.GetMetricWith(labels) + return elt, err +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { + ans, err := v.WithChecked(labels) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *GaugeVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.GaugeVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts *GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} + +// WithContext returns wrapped GaugeVec with context +func (v *GaugeVec) WithContext(ctx context.Context) *GaugeVecWithContext { + return &GaugeVecWithContext{ + ctx: ctx, + GaugeVec: v, + } +} + +func (v *GaugeVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { + return v.WithContext(ctx) +} + +// GaugeVecWithContext is the wrapper of GaugeVec with context. +type GaugeVecWithContext struct { + *GaugeVec + ctx context.Context +} + +// WithLabelValues is the wrapper of GaugeVec.WithLabelValues. +func (vc *GaugeVecWithContext) WithLabelValues(lvs ...string) GaugeMetric { + return vc.GaugeVec.WithLabelValues(lvs...) +} + +// With is the wrapper of GaugeVec.With. +func (vc *GaugeVecWithContext) With(labels map[string]string) GaugeMetric { + return vc.GaugeVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 00000000..e6884f35 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,214 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both kubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + opts.StabilityLevel.setDefaults() + + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.HistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal Histogram metric to pass in context. The context is no-op now. +func (h *Histogram) WithContext(ctx context.Context) ObserverMetric { + return h.ObserverMetric +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies kubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated, +// and only members extracted after +// registration will actually measure anything. + +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.HistogramOpts.DeprecatedVersion) +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.HistogramVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *HistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.HistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} + +// WithContext returns wrapped HistogramVec with context +func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext { + return &HistogramVecWithContext{ + ctx: ctx, + HistogramVec: v, + } +} + +// HistogramVecWithContext is the wrapper of HistogramVec with context. +type HistogramVecWithContext struct { + *HistogramVec + ctx context.Context +} + +// WithLabelValues is the wrapper of HistogramVec.WithLabelValues. +func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.HistogramVec.WithLabelValues(lvs...) +} + +// With is the wrapper of HistogramVec.With. +func (vc *HistogramVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.HistogramVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 00000000..2a0d249c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "io" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + processStartedAt time.Time +) + +func init() { + processStartedAt = time.Now() +} + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // HTTPErrorOnError serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // ContinueOnError ignore errors and try to serve as many metrics as possible. + // However, if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // PanicOnError panics upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + ho.ProcessStartTime = processStartedAt + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} + +// HandlerWithReset return an http.Handler with Reset +func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler { + defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodDelete { + reg.Reset() + io.WriteString(w, "metrics reset\n") + return + } + defaultHandler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 00000000..11af3ae4 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go new file mode 100644 index 00000000..64a430b7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyregistry + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "k8s.io/component-base/metrics" +) + +var ( + defaultRegistry = metrics.NewKubeRegistry() + // DefaultGatherer exposes the global registry gatherer + DefaultGatherer metrics.Gatherer = defaultRegistry + // Reset calls reset on the global registry + Reset = defaultRegistry.Reset + // MustRegister registers registerable metrics but uses the global registry. + MustRegister = defaultRegistry.MustRegister + // RawMustRegister registers prometheus collectors but uses the global registry, this + // bypasses the metric stability framework + // + // Deprecated + RawMustRegister = defaultRegistry.RawMustRegister + + // Register registers a collectable metric but uses the global registry + Register = defaultRegistry.Register + + // Registerer exposes the global registerer + Registerer = defaultRegistry.Registerer + + processStart time.Time +) + +func init() { + RawMustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + RawMustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll))) + defaultRegistry.RegisterMetaMetrics() + processStart = time.Now() +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +func Handler() http.Handler { + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{ProcessStartTime: processStart})) +} + +// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes +// registry reset if the http method is DELETE. +func HandlerWithReset() http.Handler { + return promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, + metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{ProcessStartTime: processStart})) +} + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go new file mode 100644 index 00000000..d68a98c4 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -0,0 +1,235 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + promext "k8s.io/component-base/metrics/prometheusextension" + + "k8s.io/klog/v2" +) + +/* +kubeCollector extends the prometheus.Collector interface to allow customization of the metric +registration process. Defer metric initialization until Create() is called, which then +delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric +method call depending on whether the metric is deprecated or not. +*/ +type kubeCollector interface { + Collector + lazyKubeMetric + DeprecatedVersion() *semver.Version + // Each collector metric should provide an initialization function + // for both deprecated and non-deprecated variants of a metric. This + // is necessary since metric instantiation will be deferred + // until the metric is actually registered somewhere. + initializeMetric() + initializeDeprecatedMetric() +} + +/* +lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected +to lazily instantiate metrics (i.e defer metric instantiation until when +the Create() function is explicitly called). +*/ +type lazyKubeMetric interface { + Create(*semver.Version) bool + IsCreated() bool + IsHidden() bool + IsDeprecated() bool +} + +/* +lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric +registration time before instantiation. Add it as an anonymous field to a struct that +implements kubeCollector to get deferred registration behavior. You must call lazyInit +with the kubeCollector itself as an argument. +*/ +type lazyMetric struct { + fqName string + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + self kubeCollector + stabilityLevel StabilityLevel +} + +func (r *lazyMetric) IsCreated() bool { + r.createLock.RLock() + defer r.createLock.RUnlock() + return r.isCreated +} + +// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed +// to allow lazy initialization for. It should be invoked in the factory function which creates new +// kubeCollector type objects. +func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { + r.fqName = fqName + r.self = self +} + +// preprocessMetric figures out whether the lazy metric should be hidden or not. +// This method takes a Version argument which should be the version of the binary in which +// this code is currently being executed. A metric can be hidden under two conditions: +// 1. if the metric is deprecated and is outside the grace period (i.e. has been +// deprecated for more than one release +// 2. if the metric is manually disabled via a CLI flag. +// +// Disclaimer: disabling a metric via a CLI flag has higher precedence than +// deprecation and will override show-hidden-metrics for the explicitly +// disabled metric. +func (r *lazyMetric) preprocessMetric(version semver.Version) { + disabledMetricsLock.RLock() + defer disabledMetricsLock.RUnlock() + // disabling metrics is higher in precedence than showing hidden metrics + if _, ok := disabledMetrics[r.fqName]; ok { + r.isHidden = true + return + } + selfVersion := r.self.DeprecatedVersion() + if selfVersion == nil { + return + } + r.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + r.isDeprecated = true + } + + if ShouldShowHidden() { + klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") + r.isHidden = true + } + }) +} + +func (r *lazyMetric) IsHidden() bool { + return r.isHidden +} + +func (r *lazyMetric) IsDeprecated() bool { + return r.isDeprecated +} + +// Create forces the initialization of metric which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the metric is deprecated or hidden, no-opting if the metric should be considered +// hidden. Furthermore, this function no-opts and returns true if metric is already +// created. +func (r *lazyMetric) Create(version *semver.Version) bool { + if version != nil { + r.preprocessMetric(*version) + } + // let's not create if this metric is slated to be hidden + if r.IsHidden() { + return false + } + + r.createOnce.Do(func() { + r.createLock.Lock() + defer r.createLock.Unlock() + r.isCreated = true + if r.IsDeprecated() { + r.self.initializeDeprecatedMetric() + } else { + r.self.initializeMetric() + } + }) + sl := r.stabilityLevel + deprecatedV := r.self.DeprecatedVersion() + dv := "" + if deprecatedV != nil { + dv = deprecatedV.String() + } + registeredMetricsTotal.WithLabelValues(string(sl), dv).Inc() + return r.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (r *lazyMetric) ClearState() { + r.createLock.Lock() + defer r.createLock.Unlock() + + r.isDeprecated = false + r.isHidden = false + r.isCreated = false + r.markDeprecationOnce = sync.Once{} + r.createOnce = sync.Once{} +} + +// FQName returns the fully-qualified metric name of the collector. +func (r *lazyMetric) FQName() string { + return r.fqName +} + +/* +This code is directly lifted from the prometheus codebase. It's a convenience struct which +allows you satisfy the Collector interface automatically if you already satisfy the Metric interface. + +For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120 +*/ +type selfCollector struct { + metric prometheus.Metric +} + +func (c *selfCollector) initSelfCollection(m prometheus.Metric) { + c.metric = m +} + +func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.metric.Desc() +} + +func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { + ch <- c.metric +} + +// no-op vecs for convenience +var noopCounterVec = &prometheus.CounterVec{} +var noopHistogramVec = &prometheus.HistogramVec{} +var noopTimingHistogramVec = &promext.TimingHistogramVec{} +var noopGaugeVec = &prometheus.GaugeVec{} + +// just use a convenience struct for all the no-ops +var noop = &noopMetric{} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Add(float64) {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Sub(float64) {} +func (noopMetric) Observe(float64) {} +func (noopMetric) ObserveWithWeight(float64, uint64) {} +func (noopMetric) SetToCurrentTime() {} +func (noopMetric) Desc() *prometheus.Desc { return nil } +func (noopMetric) Write(*dto.Metric) error { return nil } +func (noopMetric) Describe(chan<- *prometheus.Desc) {} +func (noopMetric) Collect(chan<- prometheus.Metric) {} diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go new file mode 100644 index 00000000..2c72cb48 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -0,0 +1,136 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver/v4" + "github.com/spf13/pflag" + + "k8s.io/component-base/version" +) + +// Options has all parameters needed for exposing metrics from components +type Options struct { + ShowHiddenMetricsForVersion string + DisabledMetrics []string + AllowListMapping map[string]string + AllowListMappingManifest string +} + +// NewOptions returns default metrics options +func NewOptions() *Options { + return &Options{} +} + +// Validate validates metrics flags options. +func (o *Options) Validate() []error { + if o == nil { + return nil + } + + var errs []error + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) + if err != nil { + errs = append(errs, err) + } + + if err := validateAllowMetricLabel(o.AllowListMapping); err != nil { + errs = append(errs, err) + } + + if len(errs) == 0 { + return nil + } + return errs +} + +// AddFlags adds flags for exposing component metrics. +func (o *Options) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, + "The previous version for which you want to show hidden metrics. "+ + "Only the previous minor version is meaningful, other values will not be allowed. "+ + "The format is ., e.g.: '1.16'. "+ + "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ + "rather than being surprised when they are permanently removed in the release after that.") + fs.StringSliceVar(&o.DisabledMetrics, + "disabled-metrics", + o.DisabledMetrics, + "This flag provides an escape hatch for misbehaving metrics. "+ + "You must provide the fully qualified metric name in order to disable it. "+ + "Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.") + fs.StringToStringVar(&o.AllowListMapping, "allow-metric-labels", o.AllowListMapping, + "The map from metric-label to value allow-list of this label. The key's format is ,. "+ + "The value's format is ,..."+ + "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") + fs.StringVar(&o.AllowListMappingManifest, "allow-metric-labels-manifest", o.AllowListMappingManifest, + "The path to the manifest file that contains the allow-list mapping. "+ + "The format of the file is the same as the flag --allow-metric-labels. "+ + "Note that the flag --allow-metric-labels will override the manifest file.") +} + +// Apply applies parameters into global configuration of metrics. +func (o *Options) Apply() { + if o == nil { + return + } + if len(o.ShowHiddenMetricsForVersion) > 0 { + SetShowHidden() + } + // set disabled metrics + for _, metricName := range o.DisabledMetrics { + SetDisabledMetric(metricName) + } + if o.AllowListMapping != nil { + SetLabelAllowListFromCLI(o.AllowListMapping) + } else if len(o.AllowListMappingManifest) > 0 { + SetLabelAllowListFromManifest(o.AllowListMappingManifest) + } +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} + +func validateAllowMetricLabel(allowListMapping map[string]string) error { + if allowListMapping == nil { + return nil + } + metricNameRegex := `[a-zA-Z_:][a-zA-Z0-9_:]*` + labelRegex := `[a-zA-Z_][a-zA-Z0-9_]*` + for k := range allowListMapping { + reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) + if reg.FindString(k) != k { + return fmt.Errorf("--allow-metric-labels must have a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + } + } + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go new file mode 100644 index 00000000..30dfd2e3 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -0,0 +1,380 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" + + "k8s.io/apimachinery/pkg/util/sets" + promext "k8s.io/component-base/metrics/prometheusextension" + "k8s.io/klog/v2" +) + +var ( + labelValueAllowLists = map[string]*MetricLabelAllowList{} + allowListLock sync.RWMutex +) + +// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure +// is purposefully not embedded here because that would change struct initialization +// in the manner which people are currently accustomed. +// +// Name must be set to a non-empty string. DeprecatedVersion is defined only +// if the metric for which this options applies is, in fact, deprecated. +type KubeOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + +// StabilityLevel represents the API guarantees for a given defined metric. +type StabilityLevel string + +const ( + // INTERNAL metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + INTERNAL StabilityLevel = "INTERNAL" + // ALPHA metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + ALPHA StabilityLevel = "ALPHA" + // BETA metrics are governed by the deprecation policy outlined in by + // the control plane metrics stability KEP. + BETA StabilityLevel = "BETA" + // STABLE metrics are guaranteed not be mutated and removal is governed by + // the deprecation policy outlined in by the control plane metrics stability KEP. + STABLE StabilityLevel = "STABLE" +) + +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts KubeOpts + +// Modify help description on the metric description. +func (o *CounterOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *CounterOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// TimingHistogramOpts bundles the options for creating a TimingHistogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type TimingHistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + InitialValue float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *TimingHistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *TimingHistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts { + return promext.TimingHistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + InitialValue: o.InitialValue, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} + +type MetricLabelAllowList struct { + labelToAllowList map[string]sets.String +} + +func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) { + for index, value := range labelValueList { + name := labelNameList[index] + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labelValueList[index] = "unexpected" + cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() + } + } + } +} + +func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]string) { + for name, value := range labels { + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labels[name] = "unexpected" + cardinalityEnforcementUnexpectedCategorizationsTotal.Inc() + } + } + } +} + +func SetLabelAllowListFromCLI(allowListMapping map[string]string) { + allowListLock.Lock() + defer allowListLock.Unlock() + for metricLabelName, labelValues := range allowListMapping { + metricName := strings.Split(metricLabelName, ",")[0] + labelName := strings.Split(metricLabelName, ",")[1] + valueSet := sets.NewString(strings.Split(labelValues, ",")...) + + allowList, ok := labelValueAllowLists[metricName] + if ok { + allowList.labelToAllowList[labelName] = valueSet + } else { + labelToAllowList := make(map[string]sets.String) + labelToAllowList[labelName] = valueSet + labelValueAllowLists[metricName] = &MetricLabelAllowList{ + labelToAllowList, + } + } + } +} + +func SetLabelAllowListFromManifest(manifest string) { + allowListLock.Lock() + defer allowListLock.Unlock() + allowListMapping := make(map[string]string) + data, err := os.ReadFile(filepath.Clean(manifest)) + if err != nil { + klog.Errorf("Failed to read allow list manifest: %v", err) + return + } + err = yaml.Unmarshal(data, &allowListMapping) + if err != nil { + klog.Errorf("Failed to parse allow list manifest: %v", err) + return + } + SetLabelAllowListFromCLI(allowListMapping) +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go new file mode 100644 index 00000000..4b5e7693 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "k8s.io/klog/v2" +) + +var processStartTime = NewGaugeVec( + &GaugeOpts{ + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + StabilityLevel: ALPHA, + }, + []string{}, +) + +// RegisterProcessStartTime registers the process_start_time_seconds to +// a prometheus registry. This metric needs to be included to ensure counter +// data fidelity. +func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { + start, err := getProcessStart() + if err != nil { + klog.Errorf("Could not get process start time, %v", err) + start = float64(time.Now().Unix()) + } + // processStartTime is a lazy metric which only get initialized after registered. + // so we need to register the metric first and then set the value for it + if err = registrationFunc(processStartTime); err != nil { + return err + } + + processStartTime.WithLabelValues().Set(start) + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go new file mode 100644 index 00000000..a14cd883 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -0,0 +1,39 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + + "github.com/prometheus/procfs" +) + +func getProcessStart() (float64, error) { + pid := os.Getpid() + p, err := procfs.NewProc(pid) + if err != nil { + return 0, err + } + + if stat, err := p.Stat(); err == nil { + return stat.StartTime() + } + return 0, err +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go new file mode 100644 index 00000000..7813115e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "golang.org/x/sys/windows" +) + +func getProcessStart() (float64, error) { + processHandle := windows.CurrentProcess() + + var creationTime, exitTime, kernelTime, userTime windows.Filetime + if err := windows.GetProcessTimes(processHandle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return 0, err + } + return float64(creationTime.Nanoseconds() / 1e9), nil +} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go new file mode 100644 index 00000000..416e5eda --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheus/feature/metrics.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package feature + +import ( + "context" + + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +var ( + // featureInfo is a Prometheus Gauge metrics used for recording the enablement of a k8s feature. + featureInfo = k8smetrics.NewGaugeVec( + &k8smetrics.GaugeOpts{ + Namespace: "kubernetes", + Name: "feature_enabled", + Help: "This metric records the data about the stage and enablement of a k8s feature.", + StabilityLevel: k8smetrics.BETA, + }, + []string{"name", "stage"}, + ) +) + +func init() { + legacyregistry.MustRegister(featureInfo) +} + +func ResetFeatureInfoMetric() { + featureInfo.Reset() +} + +func RecordFeatureInfo(ctx context.Context, name string, stage string, enabled bool) { + value := 0.0 + if enabled { + value = 1.0 + } + featureInfo.WithContext(ctx).WithLabelValues(name, stage).Set(value) +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go new file mode 100644 index 00000000..be07977e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go @@ -0,0 +1,189 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "errors" + "time" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// GaugeOps is the part of `prometheus.Gauge` that is relevant to +// instrumented code. +// This factoring should be in prometheus, analogous to the way +// it already factors out the Observer interface for histograms and summaries. +type GaugeOps interface { + // Set is the same as Gauge.Set + Set(float64) + // Inc is the same as Gauge.inc + Inc() + // Dec is the same as Gauge.Dec + Dec() + // Add is the same as Gauge.Add + Add(float64) + // Sub is the same as Gauge.Sub + Sub(float64) + + // SetToCurrentTime the same as Gauge.SetToCurrentTime + SetToCurrentTime() +} + +// A TimingHistogram tracks how long a `float64` variable spends in +// ranges defined by buckets. Time is counted in nanoseconds. The +// histogram's sum is the integral over time (in nanoseconds, from +// creation of the histogram) of the variable's value. +type TimingHistogram interface { + prometheus.Metric + prometheus.Collector + GaugeOps +} + +// TimingHistogramOpts is the parameters of the TimingHistogram constructor +type TimingHistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels prometheus.Labels + + // Buckets defines the buckets into which observations are + // accumulated. Each element in the slice is the upper + // inclusive bound of a bucket. The values must be sorted in + // strictly increasing order. There is no need to add a + // highest bucket with +Inf bound. The default value is + // prometheus.DefBuckets. + Buckets []float64 + + // The initial value of the variable. + InitialValue float64 +} + +// NewTimingHistogram creates a new TimingHistogram +func NewTimingHistogram(opts TimingHistogramOpts) (TimingHistogram, error) { + return NewTestableTimingHistogram(time.Now, opts) +} + +// NewTestableTimingHistogram creates a TimingHistogram that uses a mockable clock +func NewTestableTimingHistogram(nowFunc func() time.Time, opts TimingHistogramOpts) (TimingHistogram, error) { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapTimingHelp(opts.Help), + nil, + opts.ConstLabels, + ) + return newTimingHistogram(nowFunc, desc, opts) +} + +func wrapTimingHelp(given string) string { + return "EXPERIMENTAL: " + given +} + +func newTimingHistogram(nowFunc func() time.Time, desc *prometheus.Desc, opts TimingHistogramOpts, variableLabelValues ...string) (TimingHistogram, error) { + allLabelsM := prometheus.Labels{} + allLabelsS := prometheus.MakeLabelPairs(desc, variableLabelValues) + for _, pair := range allLabelsS { + if pair == nil || pair.Name == nil || pair.Value == nil { + return nil, errors.New("prometheus.MakeLabelPairs returned a nil") + } + allLabelsM[*pair.Name] = *pair.Value + } + weighted, err := newWeightedHistogram(desc, WeightedHistogramOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: opts.Name, + Help: opts.Help, + ConstLabels: allLabelsM, + Buckets: opts.Buckets, + }, variableLabelValues...) + if err != nil { + return nil, err + } + return &timingHistogram{ + nowFunc: nowFunc, + weighted: weighted, + lastSetTime: nowFunc(), + value: opts.InitialValue, + }, nil +} + +type timingHistogram struct { + nowFunc func() time.Time + weighted *weightedHistogram + + // The following fields must only be accessed with weighted's lock held + + lastSetTime time.Time // identifies when value was last set + value float64 +} + +var _ TimingHistogram = &timingHistogram{} + +func (th *timingHistogram) Set(newValue float64) { + th.update(func(float64) float64 { return newValue }) +} + +func (th *timingHistogram) Inc() { + th.update(func(oldValue float64) float64 { return oldValue + 1 }) +} + +func (th *timingHistogram) Dec() { + th.update(func(oldValue float64) float64 { return oldValue - 1 }) +} + +func (th *timingHistogram) Add(delta float64) { + th.update(func(oldValue float64) float64 { return oldValue + delta }) +} + +func (th *timingHistogram) Sub(delta float64) { + th.update(func(oldValue float64) float64 { return oldValue - delta }) +} + +func (th *timingHistogram) SetToCurrentTime() { + th.update(func(oldValue float64) float64 { return th.nowFunc().Sub(time.Unix(0, 0)).Seconds() }) +} + +func (th *timingHistogram) update(updateFn func(float64) float64) { + th.weighted.lock.Lock() + defer th.weighted.lock.Unlock() + now := th.nowFunc() + delta := now.Sub(th.lastSetTime) + value := th.value + if delta > 0 { + th.weighted.observeWithWeightLocked(value, uint64(delta)) + th.lastSetTime = now + } + th.value = updateFn(value) +} + +func (th *timingHistogram) Desc() *prometheus.Desc { + return th.weighted.Desc() +} + +func (th *timingHistogram) Write(dest *dto.Metric) error { + th.Add(0) // account for time since last update + return th.weighted.Write(dest) +} + +func (th *timingHistogram) Describe(ch chan<- *prometheus.Desc) { + ch <- th.weighted.Desc() +} + +func (th *timingHistogram) Collect(ch chan<- prometheus.Metric) { + ch <- th +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go new file mode 100644 index 00000000..7af1a458 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go @@ -0,0 +1,111 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// GaugeVecOps is a bunch of Gauge that have the same +// Desc and are distinguished by the values for their variable labels. +type GaugeVecOps interface { + GetMetricWith(prometheus.Labels) (GaugeOps, error) + GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) + With(prometheus.Labels) GaugeOps + WithLabelValues(...string) GaugeOps + CurryWith(prometheus.Labels) (GaugeVecOps, error) + MustCurryWith(prometheus.Labels) GaugeVecOps +} + +type TimingHistogramVec struct { + *prometheus.MetricVec +} + +var _ GaugeVecOps = &TimingHistogramVec{} +var _ prometheus.Collector = &TimingHistogramVec{} + +func NewTimingHistogramVec(opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec { + return NewTestableTimingHistogramVec(time.Now, opts, labelNames...) +} + +func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapTimingHelp(opts.Help), + labelNames, + opts.ConstLabels, + ) + return &TimingHistogramVec{ + MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { + metric, err := newTimingHistogram(nowFunc, desc, opts, lvs...) + if err != nil { + panic(err) // like in prometheus.newHistogram + } + return metric + }), + } +} + +func (hv *TimingHistogramVec) GetMetricWith(labels prometheus.Labels) (GaugeOps, error) { + metric, err := hv.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(GaugeOps), err + } + return nil, err +} + +func (hv *TimingHistogramVec) GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) { + metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(GaugeOps), err + } + return nil, err +} + +func (hv *TimingHistogramVec) With(labels prometheus.Labels) GaugeOps { + h, err := hv.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +func (hv *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeOps { + h, err := hv.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +func (hv *TimingHistogramVec) CurryWith(labels prometheus.Labels) (GaugeVecOps, error) { + vec, err := hv.MetricVec.CurryWith(labels) + if vec != nil { + return &TimingHistogramVec{MetricVec: vec}, err + } + return nil, err +} + +func (hv *TimingHistogramVec) MustCurryWith(labels prometheus.Labels) GaugeVecOps { + vec, err := hv.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go new file mode 100644 index 00000000..a060019b --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go @@ -0,0 +1,203 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "fmt" + "math" + "sort" + "sync" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// WeightedHistogram generalizes Histogram: each observation has +// an associated _weight_. For a given `x` and `N`, +// `1` call on `ObserveWithWeight(x, N)` has the same meaning as +// `N` calls on `ObserveWithWeight(x, 1)`. +// The weighted sum might differ slightly due to the use of +// floating point, although the implementation takes some steps +// to mitigate that. +// If every weight were 1, +// this would be the same as the existing Histogram abstraction. +type WeightedHistogram interface { + prometheus.Metric + prometheus.Collector + WeightedObserver +} + +// WeightedObserver generalizes the Observer interface. +type WeightedObserver interface { + // Set the variable to the given value with the given weight. + ObserveWithWeight(value float64, weight uint64) +} + +// WeightedHistogramOpts is the same as for an ordinary Histogram +type WeightedHistogramOpts = prometheus.HistogramOpts + +// NewWeightedHistogram creates a new WeightedHistogram +func NewWeightedHistogram(opts WeightedHistogramOpts) (WeightedHistogram, error) { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapWeightedHelp(opts.Help), + nil, + opts.ConstLabels, + ) + return newWeightedHistogram(desc, opts) +} + +func wrapWeightedHelp(given string) string { + return "EXPERIMENTAL: " + given +} + +func newWeightedHistogram(desc *prometheus.Desc, opts WeightedHistogramOpts, variableLabelValues ...string) (*weightedHistogram, error) { + if len(opts.Buckets) == 0 { + opts.Buckets = prometheus.DefBuckets + } + + for i, upperBound := range opts.Buckets { + if i < len(opts.Buckets)-1 { + if upperBound >= opts.Buckets[i+1] { + return nil, fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, opts.Buckets[i+1], + ) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + opts.Buckets = opts.Buckets[:i] + } + } + } + upperBounds := make([]float64, len(opts.Buckets)) + copy(upperBounds, opts.Buckets) + + return &weightedHistogram{ + desc: desc, + variableLabelValues: variableLabelValues, + upperBounds: upperBounds, + buckets: make([]uint64, len(upperBounds)+1), + hotCount: initialHotCount, + }, nil +} + +type weightedHistogram struct { + desc *prometheus.Desc + variableLabelValues []string + upperBounds []float64 // exclusive of +Inf + + lock sync.Mutex // applies to all the following + + // buckets is longer by one than upperBounds. + // For 0 <= idx < len(upperBounds), buckets[idx] holds the + // accumulated time.Duration that value has been <= + // upperBounds[idx] but not <= upperBounds[idx-1]. + // buckets[len(upperBounds)] holds the accumulated + // time.Duration when value fit in no other bucket. + buckets []uint64 + + // sumHot + sumCold is the weighted sum of value. + // Rather than risk loss of precision in one + // float64, we do this sum hierarchically. Many successive + // increments are added into sumHot; once in a while + // the magnitude of sumHot is compared to the magnitude + // of sumCold and, if the ratio is high enough, + // sumHot is transferred into sumCold. + sumHot float64 + sumCold float64 + + transferThreshold float64 // = math.Abs(sumCold) / 2^26 (that's about half of the bits of precision in a float64) + + // hotCount is used to decide when to consider dumping sumHot into sumCold. + // hotCount counts upward from initialHotCount to zero. + hotCount int +} + +// initialHotCount is the negative of the number of terms +// that are summed into sumHot before considering whether +// to transfer to sumCold. This only has to be big enough +// to make the extra floating point operations occur in a +// distinct minority of cases. +const initialHotCount = -15 + +var _ WeightedHistogram = &weightedHistogram{} +var _ prometheus.Metric = &weightedHistogram{} +var _ prometheus.Collector = &weightedHistogram{} + +func (sh *weightedHistogram) ObserveWithWeight(value float64, weight uint64) { + idx := sort.SearchFloat64s(sh.upperBounds, value) + sh.lock.Lock() + defer sh.lock.Unlock() + sh.updateLocked(idx, value, weight) +} + +func (sh *weightedHistogram) observeWithWeightLocked(value float64, weight uint64) { + idx := sort.SearchFloat64s(sh.upperBounds, value) + sh.updateLocked(idx, value, weight) +} + +func (sh *weightedHistogram) updateLocked(idx int, value float64, weight uint64) { + sh.buckets[idx] += weight + newSumHot := sh.sumHot + float64(weight)*value + sh.hotCount++ + if sh.hotCount >= 0 { + sh.hotCount = initialHotCount + if math.Abs(newSumHot) > sh.transferThreshold { + newSumCold := sh.sumCold + newSumHot + sh.sumCold = newSumCold + sh.transferThreshold = math.Abs(newSumCold / 67108864) + sh.sumHot = 0 + return + } + } + sh.sumHot = newSumHot +} + +func (sh *weightedHistogram) Desc() *prometheus.Desc { + return sh.desc +} + +func (sh *weightedHistogram) Write(dest *dto.Metric) error { + count, sum, buckets := func() (uint64, float64, map[float64]uint64) { + sh.lock.Lock() + defer sh.lock.Unlock() + nBounds := len(sh.upperBounds) + buckets := make(map[float64]uint64, nBounds) + var count uint64 + for idx, upperBound := range sh.upperBounds { + count += sh.buckets[idx] + buckets[upperBound] = count + } + count += sh.buckets[nBounds] + return count, sh.sumHot + sh.sumCold, buckets + }() + metric, err := prometheus.NewConstHistogram(sh.desc, count, sum, buckets, sh.variableLabelValues...) + if err != nil { + return err + } + return metric.Write(dest) +} + +func (sh *weightedHistogram) Describe(ch chan<- *prometheus.Desc) { + ch <- sh.desc +} + +func (sh *weightedHistogram) Collect(ch chan<- prometheus.Metric) { + ch <- sh +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go new file mode 100644 index 00000000..2ca95f0a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go @@ -0,0 +1,106 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// WeightedObserverVec is a bunch of WeightedObservers that have the same +// Desc and are distinguished by the values for their variable labels. +type WeightedObserverVec interface { + GetMetricWith(prometheus.Labels) (WeightedObserver, error) + GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) + With(prometheus.Labels) WeightedObserver + WithLabelValues(...string) WeightedObserver + CurryWith(prometheus.Labels) (WeightedObserverVec, error) + MustCurryWith(prometheus.Labels) WeightedObserverVec +} + +// WeightedHistogramVec implements WeightedObserverVec +type WeightedHistogramVec struct { + *prometheus.MetricVec +} + +var _ WeightedObserverVec = &WeightedHistogramVec{} +var _ prometheus.Collector = &WeightedHistogramVec{} + +func NewWeightedHistogramVec(opts WeightedHistogramOpts, labelNames ...string) *WeightedHistogramVec { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapWeightedHelp(opts.Help), + labelNames, + opts.ConstLabels, + ) + return &WeightedHistogramVec{ + MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { + metric, err := newWeightedHistogram(desc, opts, lvs...) + if err != nil { + panic(err) // like in prometheus.newHistogram + } + return metric + }), + } +} + +func (hv *WeightedHistogramVec) GetMetricWith(labels prometheus.Labels) (WeightedObserver, error) { + metric, err := hv.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(WeightedObserver), err + } + return nil, err +} + +func (hv *WeightedHistogramVec) GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) { + metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(WeightedObserver), err + } + return nil, err +} + +func (hv *WeightedHistogramVec) With(labels prometheus.Labels) WeightedObserver { + h, err := hv.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +func (hv *WeightedHistogramVec) WithLabelValues(lvs ...string) WeightedObserver { + h, err := hv.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +func (hv *WeightedHistogramVec) CurryWith(labels prometheus.Labels) (WeightedObserverVec, error) { + vec, err := hv.MetricVec.CurryWith(labels) + if vec != nil { + return &WeightedHistogramVec{MetricVec: vec}, err + } + return nil, err +} + +func (hv *WeightedHistogramVec) MustCurryWith(labels prometheus.Labels) WeightedObserverVec { + vec, err := hv.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go new file mode 100644 index 00000000..203813e8 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -0,0 +1,394 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/version" +) + +var ( + showHiddenOnce sync.Once + disabledMetricsLock sync.RWMutex + showHidden atomic.Bool + registries []*kubeRegistry // stores all registries created by NewKubeRegistry() + registriesLock sync.RWMutex + disabledMetrics = map[string]struct{}{} + + registeredMetricsTotal = NewCounterVec( + &CounterOpts{ + Name: "registered_metrics_total", + Help: "The count of registered metrics broken by stability level and deprecation version.", + StabilityLevel: BETA, + }, + []string{"stability_level", "deprecated_version"}, + ) + + disabledMetricsTotal = NewCounter( + &CounterOpts{ + Name: "disabled_metrics_total", + Help: "The count of disabled metrics.", + StabilityLevel: BETA, + }, + ) + + hiddenMetricsTotal = NewCounter( + &CounterOpts{ + Name: "hidden_metrics_total", + Help: "The count of hidden metrics.", + StabilityLevel: BETA, + }, + ) + + cardinalityEnforcementUnexpectedCategorizationsTotal = NewCounter( + &CounterOpts{ + Name: "cardinality_enforcement_unexpected_categorizations_total", + Help: "The count of unexpected categorizations during cardinality enforcement.", + StabilityLevel: ALPHA, + }, + ) +) + +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + +func SetDisabledMetric(name string) { + disabledMetricsLock.Lock() + defer disabledMetricsLock.Unlock() + disabledMetrics[name] = struct{}{} + disabledMetricsTotal.Inc() +} + +// SetShowHidden will enable showing hidden metrics. This will no-opt +// after the initial call +func SetShowHidden() { + showHiddenOnce.Do(func() { + showHidden.Store(true) + + // re-register collectors that has been hidden in phase of last registry. + for _, r := range registries { + r.enableHiddenCollectors() + r.enableHiddenStableCollectors() + } + }) +} + +// ShouldShowHidden returns whether showing hidden deprecated metrics +// is enabled. While the primary usecase for this is internal (to determine +// registration behavior) this can also be used to introspect +func ShouldShowHidden() bool { + return showHidden.Load() +} + +// Registerable is an interface for a collector metric which we +// will register with KubeRegistry. +type Registerable interface { + prometheus.Collector + + // Create will mark deprecated state for the collector + Create(version *semver.Version) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // FQName returns the fully-qualified metric name of the collector. + FQName() string +} + +type resettable interface { + Reset() +} + +// KubeRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type KubeRegistry interface { + // Deprecated + RawMustRegister(...prometheus.Collector) + // CustomRegister is our internal variant of Prometheus registry.Register + CustomRegister(c StableCollector) error + // CustomMustRegister is our internal variant of Prometheus registry.MustRegister + CustomMustRegister(cs ...StableCollector) + // Register conforms to Prometheus registry.Register + Register(Registerable) error + // MustRegister conforms to Prometheus registry.MustRegister + MustRegister(...Registerable) + // Unregister conforms to Prometheus registry.Unregister + Unregister(collector Collector) bool + // Gather conforms to Prometheus gatherer.Gather + Gather() ([]*dto.MetricFamily, error) + // Reset invokes the Reset() function on all items in the registry + // which are added as resettables. + Reset() + // RegisterMetaMetrics registers metrics about the number of registered metrics. + RegisterMetaMetrics() + // Registerer exposes the underlying prometheus registerer + Registerer() prometheus.Registerer + // Gatherer exposes the underlying prometheus gatherer + Gatherer() prometheus.Gatherer +} + +// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization +// the kubernetes binary version information is loaded into the registry object, so that +// automatic behavior can be configured for metric versioning. +type kubeRegistry struct { + PromRegistry + version semver.Version + hiddenCollectors map[string]Registerable // stores all collectors that has been hidden + stableCollectors []StableCollector // stores all stable collector + hiddenCollectorsLock sync.RWMutex + stableCollectorsLock sync.RWMutex + resetLock sync.RWMutex + resettables []resettable +} + +// Register registers a new Collector to be included in metrics +// collection. It returns an error if the descriptors provided by the +// Collector are invalid or if they — in combination with descriptors of +// already registered Collectors — do not fulfill the consistency and +// uniqueness criteria described in the documentation of metric.Desc. +func (kr *kubeRegistry) Register(c Registerable) error { + if c.Create(&kr.version) { + defer kr.addResettable(c) + return kr.PromRegistry.Register(c) + } + + kr.trackHiddenCollector(c) + return nil +} + +// Registerer exposes the underlying prometheus.Registerer +func (kr *kubeRegistry) Registerer() prometheus.Registerer { + return kr.PromRegistry +} + +// Gatherer exposes the underlying prometheus.Gatherer +func (kr *kubeRegistry) Gatherer() prometheus.Gatherer { + return kr.PromRegistry +} + +// MustRegister works like Register but registers any number of +// Collectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) MustRegister(cs ...Registerable) { + metrics := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version) { + metrics = append(metrics, c) + kr.addResettable(c) + } else { + kr.trackHiddenCollector(c) + } + } + kr.PromRegistry.MustRegister(metrics...) +} + +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + kr.trackStableCollectors(c) + defer kr.addResettable(c) + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + kr.trackStableCollectors(cs...) + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + kr.addResettable(c) + collectors = append(collectors, c) + } + } + kr.PromRegistry.MustRegister(collectors...) +} + +// RawMustRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) { + kr.PromRegistry.MustRegister(cs...) + for _, c := range cs { + kr.addResettable(c) + } +} + +// addResettable will automatically add our metric to our reset +// list if it satisfies the interface +func (kr *kubeRegistry) addResettable(i interface{}) { + kr.resetLock.Lock() + defer kr.resetLock.Unlock() + if resettable, ok := i.(resettable); ok { + kr.resettables = append(kr.resettables, resettable) + } +} + +// Unregister unregisters the Collector that equals the Collector passed +// in as an argument. (Two Collectors are considered equal if their +// Describe method yields the same set of descriptors.) The function +// returns whether a Collector was unregistered. Note that an unchecked +// Collector cannot be unregistered (as its Describe method does not +// yield any descriptor). +func (kr *kubeRegistry) Unregister(collector Collector) bool { + return kr.PromRegistry.Unregister(collector) +} + +// Gather calls the Collect method of the registered Collectors and then +// gathers the collected metrics into a lexicographically sorted slice +// of uniquely named MetricFamily protobufs. Gather ensures that the +// returned slice is valid and self-consistent so that it can be used +// for valid exposition. As an exception to the strict consistency +// requirements described for metric.Desc, Gather will tolerate +// different sets of label names for metrics of the same metric family. +func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) { + return kr.PromRegistry.Gather() +} + +// trackHiddenCollector stores all hidden collectors. +func (kr *kubeRegistry) trackHiddenCollector(c Registerable) { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + kr.hiddenCollectors[c.FQName()] = c + hiddenMetricsTotal.Inc() +} + +// trackStableCollectors stores all custom collectors. +func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) { + kr.stableCollectorsLock.Lock() + defer kr.stableCollectorsLock.Unlock() + + kr.stableCollectors = append(kr.stableCollectors, cs...) +} + +// enableHiddenCollectors will re-register all of the hidden collectors. +func (kr *kubeRegistry) enableHiddenCollectors() { + if len(kr.hiddenCollectors) == 0 { + return + } + + kr.hiddenCollectorsLock.Lock() + cs := make([]Registerable, 0, len(kr.hiddenCollectors)) + + for _, c := range kr.hiddenCollectors { + c.ClearState() + cs = append(cs, c) + } + + kr.hiddenCollectors = make(map[string]Registerable) + kr.hiddenCollectorsLock.Unlock() + kr.MustRegister(cs...) +} + +// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it. +// Since we can not register a metrics twice, so we have to unregister first then register again. +func (kr *kubeRegistry) enableHiddenStableCollectors() { + if len(kr.stableCollectors) == 0 { + return + } + + kr.stableCollectorsLock.Lock() + + cs := make([]StableCollector, 0, len(kr.stableCollectors)) + for _, c := range kr.stableCollectors { + if len(c.HiddenMetrics()) > 0 { + kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister + c.ClearState() + cs = append(cs, c) + } + } + + kr.stableCollectors = nil + kr.stableCollectorsLock.Unlock() + kr.CustomMustRegister(cs...) +} + +// Reset invokes Reset on all metrics that are resettable. +func (kr *kubeRegistry) Reset() { + kr.resetLock.RLock() + defer kr.resetLock.RUnlock() + for _, r := range kr.resettables { + r.Reset() + } +} + +// BuildVersion is a helper function that can be easily mocked. +var BuildVersion = version.Get + +func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { + r := &kubeRegistry{ + PromRegistry: prometheus.NewRegistry(), + version: parseVersion(v), + hiddenCollectors: make(map[string]Registerable), + resettables: make([]resettable, 0), + } + + registriesLock.Lock() + defer registriesLock.Unlock() + registries = append(registries, r) + + return r +} + +// NewKubeRegistry creates a new vanilla Registry +func NewKubeRegistry() KubeRegistry { + r := newKubeRegistry(BuildVersion()) + return r +} + +func (r *kubeRegistry) RegisterMetaMetrics() { + r.MustRegister(registeredMetricsTotal) + r.MustRegister(disabledMetricsTotal) + r.MustRegister(hiddenMetricsTotal) + r.MustRegister(cardinalityEnforcementUnexpectedCategorizationsTotal) +} diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go new file mode 100644 index 00000000..d4042164 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,226 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + DefAgeBuckets = prometheus.DefAgeBuckets + DefBufCap = prometheus.DefBufCap + DefMaxAge = prometheus.DefMaxAge +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both kubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + opts.StabilityLevel.setDefaults() + + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return parseSemver(s.SummaryOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// WithContext allows the normal Summary metric to pass in context. The context is no-op now. +func (s *Summary) WithContext(ctx context.Context) ObserverMetric { + return s.ObserverMetric +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies kubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated, +// and only members extracted after +// registration will actually measure anything. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.SummaryOpts.DeprecatedVersion) +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.SummaryVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *SummaryVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.SummaryVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} + +// WithContext returns wrapped SummaryVec with context +func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext { + return &SummaryVecWithContext{ + ctx: ctx, + SummaryVec: v, + } +} + +// SummaryVecWithContext is the wrapper of SummaryVec with context. +type SummaryVecWithContext struct { + *SummaryVec + ctx context.Context +} + +// WithLabelValues is the wrapper of SummaryVec.WithLabelValues. +func (vc *SummaryVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.SummaryVec.WithLabelValues(lvs...) +} + +// With is the wrapper of SummaryVec.With. +func (vc *SummaryVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.SummaryVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/timing_histogram.go b/vendor/k8s.io/component-base/metrics/timing_histogram.go new file mode 100644 index 00000000..a0f0b253 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/timing_histogram.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "time" + + "github.com/blang/semver/v4" + promext "k8s.io/component-base/metrics/prometheusextension" +) + +// PrometheusTimingHistogram is the abstraction of the underlying histogram +// that we want to promote from the wrapper. +type PrometheusTimingHistogram interface { + GaugeMetric +} + +// TimingHistogram is our internal representation for our wrapping struct around +// timing histograms. It implements both kubeCollector and GaugeMetric +type TimingHistogram struct { + PrometheusTimingHistogram + *TimingHistogramOpts + nowFunc func() time.Time + lazyMetric + selfCollector +} + +var _ GaugeMetric = &TimingHistogram{} +var _ Registerable = &TimingHistogram{} +var _ kubeCollector = &TimingHistogram{} + +// NewTimingHistogram returns an object which is TimingHistogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewTimingHistogram(opts *TimingHistogramOpts) *TimingHistogram { + return NewTestableTimingHistogram(time.Now, opts) +} + +// NewTestableTimingHistogram adds injection of the clock +func NewTestableTimingHistogram(nowFunc func() time.Time, opts *TimingHistogramOpts) *TimingHistogram { + opts.StabilityLevel.setDefaults() + + h := &TimingHistogram{ + TimingHistogramOpts: opts, + nowFunc: nowFunc, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *TimingHistogram) setPrometheusHistogram(histogram promext.TimingHistogram) { + h.PrometheusTimingHistogram = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *TimingHistogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.TimingHistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *TimingHistogram) initializeMetric() { + h.TimingHistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + histogram, err := promext.NewTestableTimingHistogram(h.nowFunc, h.TimingHistogramOpts.toPromHistogramOpts()) + if err != nil { + panic(err) // handle as for regular histograms + } + h.setPrometheusHistogram(histogram) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *TimingHistogram) initializeDeprecatedMetric() { + h.TimingHistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal TimingHistogram metric to pass in context. The context is no-op now. +func (h *TimingHistogram) WithContext(ctx context.Context) GaugeMetric { + return h.PrometheusTimingHistogram +} + +// TimingHistogramVec is the internal representation of our wrapping struct around prometheus +// TimingHistogramVecs. +type TimingHistogramVec struct { + *promext.TimingHistogramVec + *TimingHistogramOpts + nowFunc func() time.Time + lazyMetric + originalLabels []string +} + +var _ GaugeVecMetric = &TimingHistogramVec{} +var _ Registerable = &TimingHistogramVec{} +var _ kubeCollector = &TimingHistogramVec{} + +// NewTimingHistogramVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces +// and wraps an underlying promext.TimingHistogramVec object. Note well the way that +// behavior depends on registration and whether this is hidden. +func NewTimingHistogramVec(opts *TimingHistogramOpts, labels []string) *TimingHistogramVec { + return NewTestableTimingHistogramVec(time.Now, opts, labels) +} + +// NewTestableTimingHistogramVec adds injection of the clock. +func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogramOpts, labels []string) *TimingHistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &TimingHistogramVec{ + TimingHistogramVec: noopTimingHistogramVec, + TimingHistogramOpts: opts, + nowFunc: nowFunc, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *TimingHistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.TimingHistogramOpts.DeprecatedVersion) +} + +func (v *TimingHistogramVec) initializeMetric() { + v.TimingHistogramOpts.annotateStabilityLevel() + v.TimingHistogramVec = promext.NewTestableTimingHistogramVec(v.nowFunc, v.TimingHistogramOpts.toPromHistogramOpts(), v.originalLabels...) +} + +func (v *TimingHistogramVec) initializeDeprecatedMetric() { + v.TimingHistogramOpts.markDeprecated() + v.initializeMetric() +} + +// WithLabelValuesChecked, if called before this vector has been registered in +// at least one registry, will return a noop gauge and +// an error that passes ErrIsNotRegistered. +// If called on a hidden vector, +// will return a noop gauge and a nil error. +// If called with a syntactic problem in the labels, will +// return a noop gauge and an error about the labels. +// If none of the above apply, this method will return +// the appropriate vector member and a nil error. +func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...) + if err != nil { + return noop, err + } + return ops.(GaugeMetric), err +} + +// WithLabelValues calls WithLabelValuesChecked +// and handles errors as follows. +// An error that passes ErrIsNotRegistered is ignored +// and the noop gauge is returned; +// all other errors cause a panic. +func (v *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeMetric { + ans, err := v.WithLabelValuesChecked(lvs...) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// WithChecked, if called before this vector has been registered in +// at least one registry, will return a noop gauge and +// an error that passes ErrIsNotRegistered. +// If called on a hidden vector, +// will return a noop gauge and a nil error. +// If called with a syntactic problem in the labels, will +// return a noop gauge and an error about the labels. +// If none of the above apply, this method will return +// the appropriate vector member and a nil error. +func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + ops, err := v.TimingHistogramVec.GetMetricWith(labels) + return ops.(GaugeMetric), err +} + +// With calls WithChecked and handles errors as follows. +// An error that passes ErrIsNotRegistered is ignored +// and the noop gauge is returned; +// all other errors cause a panic. +func (v *TimingHistogramVec) With(labels map[string]string) GaugeMetric { + ans, err := v.WithChecked(labels) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *TimingHistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.TimingHistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *TimingHistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.TimingHistogramVec.Reset() +} + +// WithContext returns wrapped TimingHistogramVec with context +func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { + return &TimingHistogramVecWithContext{ + ctx: ctx, + TimingHistogramVec: v, + } +} + +// TimingHistogramVecWithContext is the wrapper of TimingHistogramVec with context. +// Currently the context is ignored. +type TimingHistogramVecWithContext struct { + *TimingHistogramVec + ctx context.Context +} diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 00000000..4a405048 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewConstMetric is a helper of NewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.IsHidden() { + return nil, nil + } + return prometheus.NewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go new file mode 100644 index 00000000..f963e205 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "k8s.io/component-base/version" + +var ( + buildInfo = NewGaugeVec( + &GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + StabilityLevel: ALPHA, + }, + []string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"}, + ) +) + +// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus +func RegisterBuildInfo(r KubeRegistry) { + info := version.Get() + r.MustRegister(buildInfo) + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) +} diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go new file mode 100644 index 00000000..102e108e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version_parser.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver/v4" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +const ( + versionRegexpString = `^v(\d+\.\d+\.\d+)` +) + +var ( + versionRe = regexp.MustCompile(versionRegexpString) +) + +func parseSemver(s string) *semver.Version { + if s != "" { + sv := semver.MustParse(s) + return &sv + } + return nil +} +func parseVersion(ver apimachineryversion.Info) semver.Version { + matches := versionRe.FindAllStringSubmatch(ver.String(), -1) + + if len(matches) != 1 { + panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String())) + } + return semver.MustParse(matches[0][1]) +} diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go new file mode 100644 index 00000000..679590aa --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/wrappers.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// This file contains a series of interfaces which we explicitly define for +// integrating with prometheus. We redefine the interfaces explicitly here +// so that we can prevent breakage if methods are ever added to prometheus +// variants of them. + +// Collector defines a subset of prometheus.Collector interface methods +type Collector interface { + Describe(chan<- *prometheus.Desc) + Collect(chan<- prometheus.Metric) +} + +// Metric defines a subset of prometheus.Metric interface methods +type Metric interface { + Desc() *prometheus.Desc + Write(*dto.Metric) error +} + +// CounterMetric is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. + +// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter +type CounterMetric interface { + Inc() + Add(float64) +} + +// CounterVecMetric is an interface which prometheus.CounterVec satisfies. +type CounterVecMetric interface { + WithLabelValues(...string) CounterMetric + With(prometheus.Labels) CounterMetric +} + +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) + Inc() + Dec() + Add(float64) + Write(out *dto.Metric) error + SetToCurrentTime() +} + +// GaugeVecMetric is a collection of Gauges that differ only in label values. +type GaugeVecMetric interface { + // Default Prometheus Vec behavior is that member extraction results in creation of a new element + // if one with the unique label values is not found in the underlying stored metricMap. + // This means that if this function is called but the underlying metric is not registered + // (which means it will never be exposed externally nor consumed), the metric would exist in memory + // for perpetuity (i.e. throughout application lifecycle). + // + // For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + // + // In contrast, the Vec behavior in this package is that member extraction before registration + // returns a permanent noop object. + + // WithLabelValuesChecked, if called before this vector has been registered in + // at least one registry, will return a noop gauge and + // an error that passes ErrIsNotRegistered. + // If called on a hidden vector, + // will return a noop gauge and a nil error. + // If called with a syntactic problem in the labels, will + // return a noop gauge and an error about the labels. + // If none of the above apply, this method will return + // the appropriate vector member and a nil error. + WithLabelValuesChecked(labelValues ...string) (GaugeMetric, error) + + // WithLabelValues calls WithLabelValuesChecked + // and handles errors as follows. + // An error that passes ErrIsNotRegistered is ignored + // and the noop gauge is returned; + // all other errors cause a panic. + WithLabelValues(labelValues ...string) GaugeMetric + + // WithChecked, if called before this vector has been registered in + // at least one registry, will return a noop gauge and + // an error that passes ErrIsNotRegistered. + // If called on a hidden vector, + // will return a noop gauge and a nil error. + // If called with a syntactic problem in the labels, will + // return a noop gauge and an error about the labels. + // If none of the above apply, this method will return + // the appropriate vector member and a nil error. + WithChecked(labels map[string]string) (GaugeMetric, error) + + // With calls WithChecked and handles errors as follows. + // An error that passes ErrIsNotRegistered is ignored + // and the noop gauge is returned; + // all other errors cause a panic. + With(labels map[string]string) GaugeMetric + + // Delete asserts that the vec should have no member for the given label set. + // The returned bool indicates whether there was a change. + // The return will certainly be `false` if the given label set has the wrong + // set of label names. + Delete(map[string]string) bool + + // Reset removes all the members + Reset() +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + +// PromRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type PromRegistry interface { + Register(prometheus.Collector) error + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool + Gather() ([]*dto.MetricFamily, error) +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// Registerer is the interface for the part of a registry in charge of registering +// the collected metrics. +type Registerer interface { + prometheus.Registerer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +func ErrIsNotRegistered(err error) bool { + return err == errNotRegistered +} + +var errNotRegistered = errors.New("metric vec is not registered yet") diff --git a/vendor/k8s.io/component-base/tracing/OWNERS b/vendor/k8s.io/component-base/tracing/OWNERS new file mode 100644 index 00000000..b26e7a4d --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-instrumentation-approvers +reviewers: + - sig-instrumentation-reviewers +labels: + - sig/instrumentation diff --git a/vendor/k8s.io/component-base/tracing/api/v1/config.go b/vendor/k8s.io/component-base/tracing/api/v1/config.go new file mode 100644 index 00000000..ae9bbbfc --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/api/v1/config.go @@ -0,0 +1,88 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "net/url" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/component-base/featuregate" +) + +var ( + maxSamplingRatePerMillion = int32(1000000) +) + +// ValidateTracingConfiguration validates the tracing configuration +func ValidateTracingConfiguration(traceConfig *TracingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if traceConfig == nil { + return allErrs + } + if traceConfig.SamplingRatePerMillion != nil { + allErrs = append(allErrs, validateSamplingRate(*traceConfig.SamplingRatePerMillion, fldPath.Child("samplingRatePerMillion"))...) + } + if traceConfig.Endpoint != nil { + allErrs = append(allErrs, validateEndpoint(*traceConfig.Endpoint, fldPath.Child("endpoint"))...) + } + return allErrs +} + +func validateSamplingRate(rate int32, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if rate < 0 { + errs = append(errs, field.Invalid( + fldPath, rate, + "sampling rate must be positive", + )) + } + if rate > maxSamplingRatePerMillion { + errs = append(errs, field.Invalid( + fldPath, rate, + "sampling rate per million must be less than or equal to one million", + )) + } + return errs +} + +func validateEndpoint(endpoint string, fldPath *field.Path) field.ErrorList { + errs := field.ErrorList{} + if !strings.Contains(endpoint, "//") { + endpoint = "dns://" + endpoint + } + url, err := url.Parse(endpoint) + if err != nil { + errs = append(errs, field.Invalid( + fldPath, endpoint, + err.Error(), + )) + return errs + } + switch url.Scheme { + case "dns": + case "unix": + case "unix-abstract": + default: + errs = append(errs, field.Invalid( + fldPath, endpoint, + fmt.Sprintf("unsupported scheme: %v. Options are none, dns, unix, or unix-abstract. See https://github.com/grpc/grpc/blob/master/doc/naming.md", url.Scheme), + )) + } + return errs +} diff --git a/vendor/k8s.io/component-base/tracing/api/v1/doc.go b/vendor/k8s.io/component-base/tracing/api/v1/doc.go new file mode 100644 index 00000000..ffe36aef --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/api/v1/doc.go @@ -0,0 +1,29 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package v1 contains the configuration API for tracing. +// +// The intention is to only have a single version of this API, potentially with +// new fields added over time in a backwards-compatible manner. Fields for +// alpha or beta features are allowed as long as they are defined so that not +// changing the defaults leaves those features disabled. +// +// The "v1" package name is just a reminder that API compatibility rules apply, +// not an indication of the stability of all features covered by it. + +package v1 // import "k8s.io/component-base/tracing/api/v1" diff --git a/vendor/k8s.io/component-base/tracing/api/v1/types.go b/vendor/k8s.io/component-base/tracing/api/v1/types.go new file mode 100644 index 00000000..a59d5640 --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/api/v1/types.go @@ -0,0 +1,32 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// TracingConfiguration provides versioned configuration for OpenTelemetry tracing clients. +type TracingConfiguration struct { + // Endpoint of the collector this component will report traces to. + // The connection is insecure, and does not currently support TLS. + // Recommended is unset, and endpoint is the otlp grpc default, localhost:4317. + // +optional + Endpoint *string `json:"endpoint,omitempty"` + + // SamplingRatePerMillion is the number of samples to collect per million spans. + // Recommended is unset. If unset, sampler respects its parent span's sampling + // rate, but otherwise never samples. + // +optional + SamplingRatePerMillion *int32 `json:"samplingRatePerMillion,omitempty"` +} diff --git a/vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go b/vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..2afc6811 --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,48 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TracingConfiguration) DeepCopyInto(out *TracingConfiguration) { + *out = *in + if in.Endpoint != nil { + in, out := &in.Endpoint, &out.Endpoint + *out = new(string) + **out = **in + } + if in.SamplingRatePerMillion != nil { + in, out := &in.SamplingRatePerMillion, &out.SamplingRatePerMillion + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TracingConfiguration. +func (in *TracingConfiguration) DeepCopy() *TracingConfiguration { + if in == nil { + return nil + } + out := new(TracingConfiguration) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/component-base/tracing/tracing.go b/vendor/k8s.io/component-base/tracing/tracing.go new file mode 100644 index 00000000..bdf6f377 --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/tracing.go @@ -0,0 +1,98 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracing + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + utiltrace "k8s.io/utils/trace" +) + +const instrumentationScope = "k8s.io/component-base/tracing" + +// Start creates spans using both OpenTelemetry, and the k8s.io/utils/trace package. +// It only creates an OpenTelemetry span if the incoming context already includes a span. +func Start(ctx context.Context, name string, attributes ...attribute.KeyValue) (context.Context, *Span) { + // If the incoming context already includes an OpenTelemetry span, create a child span with the provided name and attributes. + // If the caller is not using OpenTelemetry, or has tracing disabled (e.g. with a component-specific feature flag), this is a noop. + ctx, otelSpan := trace.SpanFromContext(ctx).TracerProvider().Tracer(instrumentationScope).Start(ctx, name, trace.WithAttributes(attributes...)) + // If there is already a utiltrace span in the context, use that as our parent span. + utilSpan := utiltrace.FromContext(ctx).Nest(name, attributesToFields(attributes)...) + // Set the trace as active in the context so that subsequent Start calls create nested spans. + return utiltrace.ContextWithTrace(ctx, utilSpan), &Span{ + otelSpan: otelSpan, + utilSpan: utilSpan, + } +} + +// Span is a component part of a trace. It represents a single named +// and timed operation of a workflow being observed. +// This Span is a combination of an OpenTelemetry and k8s.io/utils/trace span +// to facilitate the migration to OpenTelemetry. +type Span struct { + otelSpan trace.Span + utilSpan *utiltrace.Trace +} + +// AddEvent adds a point-in-time event with a name and attributes. +func (s *Span) AddEvent(name string, attributes ...attribute.KeyValue) { + s.otelSpan.AddEvent(name, trace.WithAttributes(attributes...)) + if s.utilSpan != nil { + s.utilSpan.Step(name, attributesToFields(attributes)...) + } +} + +// End ends the span, and logs if the span duration is greater than the logThreshold. +func (s *Span) End(logThreshold time.Duration) { + s.otelSpan.End() + if s.utilSpan != nil { + s.utilSpan.LogIfLong(logThreshold) + } +} + +// RecordError will record err as an exception span event for this span. +// If this span is not being recorded or err is nil then this method does nothing. +func (s *Span) RecordError(err error, attributes ...attribute.KeyValue) { + s.otelSpan.RecordError(err, trace.WithAttributes(attributes...)) +} + +func attributesToFields(attributes []attribute.KeyValue) []utiltrace.Field { + fields := make([]utiltrace.Field, len(attributes)) + for i := range attributes { + attr := attributes[i] + fields[i] = utiltrace.Field{Key: string(attr.Key), Value: attr.Value.AsInterface()} + } + return fields +} + +// SpanFromContext returns the *Span from the current context. It is composed of the active +// OpenTelemetry and k8s.io/utils/trace spans. +func SpanFromContext(ctx context.Context) *Span { + return &Span{ + otelSpan: trace.SpanFromContext(ctx), + utilSpan: utiltrace.FromContext(ctx), + } +} + +// ContextWithSpan returns a context with the Span included in the context. +func ContextWithSpan(ctx context.Context, s *Span) context.Context { + return trace.ContextWithSpan(utiltrace.ContextWithTrace(ctx, s.utilSpan), s.otelSpan) +} diff --git a/vendor/k8s.io/component-base/tracing/utils.go b/vendor/k8s.io/component-base/tracing/utils.go new file mode 100644 index 00000000..dde7a5b2 --- /dev/null +++ b/vendor/k8s.io/component-base/tracing/utils.go @@ -0,0 +1,134 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tracing + +import ( + "context" + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + oteltrace "go.opentelemetry.io/otel/trace" + noopoteltrace "go.opentelemetry.io/otel/trace/noop" + + "k8s.io/client-go/transport" + "k8s.io/component-base/tracing/api/v1" +) + +// TracerProvider is an OpenTelemetry TracerProvider which can be shut down +type TracerProvider interface { + oteltrace.TracerProvider + Shutdown(context.Context) error +} + +type noopTracerProvider struct { + oteltrace.TracerProvider +} + +func (n *noopTracerProvider) Shutdown(context.Context) error { + return nil +} + +func NewNoopTracerProvider() TracerProvider { + return &noopTracerProvider{TracerProvider: noopoteltrace.NewTracerProvider()} +} + +// NewProvider creates a TracerProvider in a component, and enforces recommended tracing behavior +func NewProvider(ctx context.Context, + tracingConfig *v1.TracingConfiguration, + addedOpts []otlptracegrpc.Option, + resourceOpts []resource.Option, +) (TracerProvider, error) { + if tracingConfig == nil { + return NewNoopTracerProvider(), nil + } + opts := append([]otlptracegrpc.Option{}, addedOpts...) + if tracingConfig.Endpoint != nil { + opts = append(opts, otlptracegrpc.WithEndpoint(*tracingConfig.Endpoint)) + } + opts = append(opts, otlptracegrpc.WithInsecure()) + exporter, err := otlptracegrpc.New(ctx, opts...) + if err != nil { + return nil, err + } + res, err := resource.New(ctx, resourceOpts...) + if err != nil { + return nil, err + } + + // sampler respects parent span's sampling rate or + // otherwise never samples. + sampler := sdktrace.NeverSample() + // Or, emit spans for a fraction of transactions + if tracingConfig.SamplingRatePerMillion != nil && *tracingConfig.SamplingRatePerMillion > 0 { + sampler = sdktrace.TraceIDRatioBased(float64(*tracingConfig.SamplingRatePerMillion) / float64(1000000)) + } + // batch span processor to aggregate spans before export. + bsp := sdktrace.NewBatchSpanProcessor(exporter) + tp := sdktrace.NewTracerProvider( + sdktrace.WithSampler(sdktrace.ParentBased(sampler)), + sdktrace.WithSpanProcessor(bsp), + sdktrace.WithResource(res), + ) + return tp, nil +} + +// WithTracing adds tracing to requests if the incoming request is sampled +func WithTracing(handler http.Handler, tp oteltrace.TracerProvider, spanName string) http.Handler { + opts := []otelhttp.Option{ + otelhttp.WithPropagators(Propagators()), + otelhttp.WithTracerProvider(tp), + } + wrappedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Add the http.target attribute to the otelhttp span + // Workaround for https://github.com/open-telemetry/opentelemetry-go-contrib/issues/3743 + if r.URL != nil { + oteltrace.SpanFromContext(r.Context()).SetAttributes(semconv.HTTPTarget(r.URL.RequestURI())) + } + handler.ServeHTTP(w, r) + }) + // With Noop TracerProvider, the otelhttp still handles context propagation. + // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough + return otelhttp.NewHandler(wrappedHandler, spanName, opts...) +} + +// WrapperFor can be used to add tracing to a *rest.Config. +// Example usage: +// tp := NewProvider(...) +// config, _ := rest.InClusterConfig() +// config.Wrap(WrapperFor(tp)) +// kubeclient, _ := clientset.NewForConfig(config) +func WrapperFor(tp oteltrace.TracerProvider) transport.WrapperFunc { + return func(rt http.RoundTripper) http.RoundTripper { + opts := []otelhttp.Option{ + otelhttp.WithPropagators(Propagators()), + otelhttp.WithTracerProvider(tp), + } + // With Noop TracerProvider, the otelhttp still handles context propagation. + // See https://github.com/open-telemetry/opentelemetry-go/tree/main/example/passthrough + return otelhttp.NewTransport(rt, opts...) + } +} + +// Propagators returns the recommended set of propagators. +func Propagators() propagation.TextMapPropagator { + return propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go index 5789e67a..1b758ab2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -22,7 +22,7 @@ import ( "strings" openapi_v2 "github.com/google/gnostic-models/openapiv2" - "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) func newSchemaError(path *Path, format string, a ...interface{}) error { diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore b/vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore new file mode 100644 index 00000000..dd91ed6a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/errors/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE b/vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/errors/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go b/vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go new file mode 100644 index 00000000..e0b31004 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/errors/api.go @@ -0,0 +1,46 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" +) + +// Error represents a error interface all swagger framework errors implement +type Error interface { + error + Code() int32 +} + +type apiError struct { + code int32 + message string +} + +func (a *apiError) Error() string { + return a.message +} + +func (a *apiError) Code() int32 { + return a.code +} + +// New creates a new API error with a code and a message +func New(code int32, message string, args ...interface{}) Error { + if len(args) > 0 { + return &apiError{code, fmt.Sprintf(message, args...)} + } + return &apiError{code, message} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go b/vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go new file mode 100644 index 00000000..af01190c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/errors/doc.go @@ -0,0 +1,26 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package errors provides an Error interface and several concrete types +implementing this interface to manage API errors and JSON-schema validation +errors. + +A middleware handler ServeError() is provided to serve the errors types +it defines. + +It is used throughout the various go-openapi toolkit libraries +(https://github.com/go-openapi). +*/ +package errors diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go b/vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go new file mode 100644 index 00000000..3da85c36 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/errors/headers.go @@ -0,0 +1,44 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +// Validation represents a failure of a precondition +type Validation struct { + code int32 + Name string + In string + Value interface{} + Valid interface{} + message string + Values []interface{} +} + +func (e *Validation) Error() string { + return e.message +} + +// Code the error code +func (e *Validation) Code() int32 { + return e.code +} + +// ValidateName produces an error message name for an aliased property +func (e *Validation) ValidateName(name string) *Validation { + if e.Name == "" && name != "" { + e.Name = name + e.message = name + e.message + } + return e +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go new file mode 100644 index 00000000..65f133e9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/errors/schema.go @@ -0,0 +1,573 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "fmt" + "strings" +) + +const ( + invalidType = "%s is an invalid type name" + typeFail = "%s in %s must be of type %s" + typeFailWithData = "%s in %s must be of type %s: %q" + typeFailWithError = "%s in %s must be of type %s, because: %s" + requiredFail = "%s in %s is required" + tooLongMessage = "%s in %s should be at most %d chars long" + tooShortMessage = "%s in %s should be at least %d chars long" + patternFail = "%s in %s should match '%s'" + enumFail = "%s in %s should be one of %v" + multipleOfFail = "%s in %s should be a multiple of %v" + maxIncFail = "%s in %s should be less than or equal to %v" + maxExcFail = "%s in %s should be less than %v" + minIncFail = "%s in %s should be greater than or equal to %v" + minExcFail = "%s in %s should be greater than %v" + uniqueFail = "%s in %s shouldn't contain duplicates" + maxItemsFail = "%s in %s should have at most %d items" + minItemsFail = "%s in %s should have at least %d items" + typeFailNoIn = "%s must be of type %s" + typeFailWithDataNoIn = "%s must be of type %s: %q" + typeFailWithErrorNoIn = "%s must be of type %s, because: %s" + requiredFailNoIn = "%s is required" + tooLongMessageNoIn = "%s should be at most %d chars long" + tooShortMessageNoIn = "%s should be at least %d chars long" + patternFailNoIn = "%s should match '%s'" + enumFailNoIn = "%s should be one of %v" + multipleOfFailNoIn = "%s should be a multiple of %v" + maxIncFailNoIn = "%s should be less than or equal to %v" + maxExcFailNoIn = "%s should be less than %v" + minIncFailNoIn = "%s should be greater than or equal to %v" + minExcFailNoIn = "%s should be greater than %v" + uniqueFailNoIn = "%s shouldn't contain duplicates" + maxItemsFailNoIn = "%s should have at most %d items" + minItemsFailNoIn = "%s should have at least %d items" + noAdditionalItems = "%s in %s can't have additional items" + noAdditionalItemsNoIn = "%s can't have additional items" + tooFewProperties = "%s in %s should have at least %d properties" + tooFewPropertiesNoIn = "%s should have at least %d properties" + tooManyProperties = "%s in %s should have at most %d properties" + tooManyPropertiesNoIn = "%s should have at most %d properties" + unallowedProperty = "%s.%s in %s is a forbidden property" + unallowedPropertyNoIn = "%s.%s is a forbidden property" + failedAllPatternProps = "%s.%s in %s failed all pattern properties" + failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" + multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" +) + +// All code responses can be used to differentiate errors for different handling +// by the consuming program +const ( + // CompositeErrorCode remains 422 for backwards-compatibility + // and to separate it from validation errors with cause + CompositeErrorCode = 422 + // InvalidTypeCode is used for any subclass of invalid types + InvalidTypeCode = 600 + iota + RequiredFailCode + TooLongFailCode + TooShortFailCode + PatternFailCode + EnumFailCode + MultipleOfFailCode + MaxFailCode + MinFailCode + UniqueFailCode + MaxItemsFailCode + MinItemsFailCode + NoAdditionalItemsCode + TooFewPropertiesCode + TooManyPropertiesCode + UnallowedPropertyCode + FailedAllPatternPropsCode + MultipleOfMustBePositiveCode +) + +// CompositeError is an error that groups several errors together +type CompositeError struct { + Errors []error + code int32 + message string +} + +// Code for this error +func (c *CompositeError) Code() int32 { + return c.code +} + +func (c *CompositeError) Error() string { + if len(c.Errors) > 0 { + msgs := []string{c.message + ":"} + for _, e := range c.Errors { + msgs = append(msgs, e.Error()) + } + return strings.Join(msgs, "\n") + } + return c.message +} + +// CompositeValidationError an error to wrap a bunch of other errors +func CompositeValidationError(errors ...error) *CompositeError { + return &CompositeError{ + code: CompositeErrorCode, + Errors: append([]error{}, errors...), + message: "validation failure list", + } +} + +// FailedAllPatternProperties an error for when the property doesn't match a pattern +func FailedAllPatternProperties(name, in, key string) *Validation { + msg := fmt.Sprintf(failedAllPatternProps, name, key, in) + if in == "" { + msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) + } + return &Validation{ + code: FailedAllPatternPropsCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// PropertyNotAllowed an error for when the property doesn't match a pattern +func PropertyNotAllowed(name, in, key string) *Validation { + msg := fmt.Sprintf(unallowedProperty, name, key, in) + if in == "" { + msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) + } + return &Validation{ + code: UnallowedPropertyCode, + Name: name, + In: in, + Value: key, + message: msg, + } +} + +// TooFewProperties an error for an object with too few properties +func TooFewProperties(name, in string, minProperties, size int64) *Validation { + msg := fmt.Sprintf(tooFewProperties, name, in, minProperties) + if in == "" { + msg = fmt.Sprintf(tooFewPropertiesNoIn, name, minProperties) + } + return &Validation{ + code: TooFewPropertiesCode, + Name: name, + In: in, + Value: size, + Valid: minProperties, + message: msg, + } +} + +// TooManyProperties an error for an object with too many properties +func TooManyProperties(name, in string, maxProperties, size int64) *Validation { + msg := fmt.Sprintf(tooManyProperties, name, in, maxProperties) + if in == "" { + msg = fmt.Sprintf(tooManyPropertiesNoIn, name, maxProperties) + } + return &Validation{ + code: TooManyPropertiesCode, + Name: name, + In: in, + Value: size, + Valid: maxProperties, + message: msg, + } +} + +// AdditionalItemsNotAllowed an error for invalid additional items +func AdditionalItemsNotAllowed(name, in string) *Validation { + msg := fmt.Sprintf(noAdditionalItems, name, in) + if in == "" { + msg = fmt.Sprintf(noAdditionalItemsNoIn, name) + } + return &Validation{ + code: NoAdditionalItemsCode, + Name: name, + In: in, + message: msg, + } +} + +// InvalidCollectionFormat another flavor of invalid type error +func InvalidCollectionFormat(name, in, format string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: format, + message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), + } +} + +// InvalidTypeName an error for when the type is invalid +func InvalidTypeName(typeName string) *Validation { + return &Validation{ + code: InvalidTypeCode, + Value: typeName, + message: fmt.Sprintf(invalidType, typeName), + } +} + +// InvalidType creates an error for when the type is invalid +func InvalidType(name, in, typeName string, value interface{}) *Validation { + var message string + + if in != "" { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) + default: + message = fmt.Sprintf(typeFail, name, in, typeName) + } + } else { + switch value.(type) { + case string: + message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) + case error: + message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) + default: + message = fmt.Sprintf(typeFailNoIn, name, typeName) + } + } + + return &Validation{ + code: InvalidTypeCode, + Name: name, + In: in, + Value: value, + message: message, + } + +} + +// DuplicateItems error for when an array contains duplicates +func DuplicateItems(name, in string) *Validation { + msg := fmt.Sprintf(uniqueFail, name, in) + if in == "" { + msg = fmt.Sprintf(uniqueFailNoIn, name) + } + return &Validation{ + code: UniqueFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooManyItems error for when an array contains too many items +func TooManyItems(name, in string, max int64, value interface{}) *Validation { + msg := fmt.Sprintf(maxItemsFail, name, in, max) + if in == "" { + msg = fmt.Sprintf(maxItemsFailNoIn, name, max) + } + + return &Validation{ + code: MaxItemsFailCode, + Name: name, + In: in, + Value: value, + Valid: max, + message: msg, + } +} + +// TooFewItems error for when an array contains too few items +func TooFewItems(name, in string, min int64, value interface{}) *Validation { + msg := fmt.Sprintf(minItemsFail, name, in, min) + if in == "" { + msg = fmt.Sprintf(minItemsFailNoIn, name, min) + } + return &Validation{ + code: MinItemsFailCode, + Name: name, + In: in, + Value: value, + Valid: min, + message: msg, + } +} + +// ExceedsMaximumInt error for when maxinum validation fails +func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximumUint error for when maxinum validation fails +func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMaximum error for when maxinum validation fails +func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := maxIncFailNoIn + if exclusive { + m = maxExcFailNoIn + } + message = fmt.Sprintf(m, name, max) + } else { + m := maxIncFail + if exclusive { + m = maxExcFail + } + message = fmt.Sprintf(m, name, in, max) + } + return &Validation{ + code: MaxFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumInt error for when maxinum validation fails +func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimumUint error for when maxinum validation fails +func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// ExceedsMinimum error for when maxinum validation fails +func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { + var message string + if in == "" { + m := minIncFailNoIn + if exclusive { + m = minExcFailNoIn + } + message = fmt.Sprintf(m, name, min) + } else { + m := minIncFail + if exclusive { + m = minExcFail + } + message = fmt.Sprintf(m, name, in, min) + } + return &Validation{ + code: MinFailCode, + Name: name, + In: in, + Value: value, + message: message, + } +} + +// NotMultipleOf error for when multiple of validation fails +func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) + } else { + msg = fmt.Sprintf(multipleOfFail, name, in, multiple) + } + return &Validation{ + code: MultipleOfFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// EnumFail error for when an enum validation fails +func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(enumFailNoIn, name, values) + } else { + msg = fmt.Sprintf(enumFail, name, in, values) + } + + return &Validation{ + code: EnumFailCode, + Name: name, + In: in, + Value: value, + Values: values, + message: msg, + } +} + +// Required error for when a value is missing +func Required(name, in string) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(requiredFailNoIn, name) + } else { + msg = fmt.Sprintf(requiredFail, name, in) + } + return &Validation{ + code: RequiredFailCode, + Name: name, + In: in, + message: msg, + } +} + +// TooLong error for when a string is too long +func TooLong(name, in string, max int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooLongMessageNoIn, name, max) + } else { + msg = fmt.Sprintf(tooLongMessage, name, in, max) + } + return &Validation{ + code: TooLongFailCode, + Name: name, + In: in, + Value: value, + Valid: max, + message: msg, + } +} + +// TooShort error for when a string is too short +func TooShort(name, in string, min int64, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(tooShortMessageNoIn, name, min) + } else { + msg = fmt.Sprintf(tooShortMessage, name, in, min) + } + + return &Validation{ + code: TooShortFailCode, + Name: name, + In: in, + Value: value, + Valid: min, + message: msg, + } +} + +// FailedPattern error for when a string fails a regex pattern match +// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. +func FailedPattern(name, in, pattern string, value interface{}) *Validation { + var msg string + if in == "" { + msg = fmt.Sprintf(patternFailNoIn, name, pattern) + } else { + msg = fmt.Sprintf(patternFail, name, in, pattern) + } + + return &Validation{ + code: PatternFailCode, + Name: name, + In: in, + Value: value, + message: msg, + } +} + +// MultipleOfMustBePositive error for when a +// multipleOf factor is negative +func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { + return &Validation{ + code: MultipleOfMustBePositiveCode, + Name: name, + In: in, + Value: factor, + message: fmt.Sprintf(multipleOfMustBePositive, name, factor), + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore new file mode 100644 index 00000000..dd91ed6a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/.gitignore @@ -0,0 +1,2 @@ +secrets.yml +coverage.out diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go new file mode 100644 index 00000000..0b6380c8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + bsonprim "k8s.io/kube-openapi/pkg/validation/strfmt/bson" +) + +func init() { + var id ObjectId + // register this format in the default registry + Default.Add("bsonobjectid", &id, IsBSONObjectID) +} + +// IsBSONObjectID returns true when the string is a valid BSON.ObjectId +func IsBSONObjectID(str string) bool { + _, err := bsonprim.ObjectIDFromHex(str) + return err == nil +} + +// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) +// +// swagger:strfmt bsonobjectid +type ObjectId bsonprim.ObjectID + +// NewObjectId creates a ObjectId from a Hex String +func NewObjectId(hex string) ObjectId { + oid, err := bsonprim.ObjectIDFromHex(hex) + if err != nil { + panic(err) + } + return ObjectId(oid) +} + +// MarshalText turns this instance into text +func (id ObjectId) MarshalText() ([]byte, error) { + oid := bsonprim.ObjectID(id) + if oid == bsonprim.NilObjectID { + return nil, nil + } + return []byte(oid.Hex()), nil +} + +// UnmarshalText hydrates this instance from text +func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on + if len(data) == 0 { + *id = ObjectId(bsonprim.NilObjectID) + return nil + } + oidstr := string(data) + oid, err := bsonprim.ObjectIDFromHex(oidstr) + if err != nil { + return err + } + *id = ObjectId(oid) + return nil +} + +func (id ObjectId) String() string { + return bsonprim.ObjectID(id).String() +} + +// MarshalJSON returns the ObjectId as JSON +func (id ObjectId) MarshalJSON() ([]byte, error) { + return bsonprim.ObjectID(id).MarshalJSON() +} + +// UnmarshalJSON sets the ObjectId from JSON +func (id *ObjectId) UnmarshalJSON(data []byte) error { + var obj bsonprim.ObjectID + if err := obj.UnmarshalJSON(data); err != nil { + return err + } + *id = ObjectId(obj) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (id *ObjectId) DeepCopyInto(out *ObjectId) { + *out = *id +} + +// DeepCopy copies the receiver into a new ObjectId. +func (id *ObjectId) DeepCopy() *ObjectId { + if id == nil { + return nil + } + out := new(ObjectId) + id.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go new file mode 100644 index 00000000..824534b2 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/bson/objectid.go @@ -0,0 +1,122 @@ +// Copyright (C) MongoDB, Inc. 2017-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +// +// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer +// See THIRD-PARTY-NOTICES for original license terms. + +package bson + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" +) + +// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID. +var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID") + +// ObjectID is the BSON ObjectID type. +type ObjectID [12]byte + +// NilObjectID is the zero value for ObjectID. +var NilObjectID ObjectID + +// Hex returns the hex encoding of the ObjectID as a string. +func (id ObjectID) Hex() string { + return hex.EncodeToString(id[:]) +} + +func (id ObjectID) String() string { + return fmt.Sprintf("ObjectID(%q)", id.Hex()) +} + +// IsZero returns true if id is the empty ObjectID. +func (id ObjectID) IsZero() bool { + return bytes.Equal(id[:], NilObjectID[:]) +} + +// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a +// valid ObjectID. +func ObjectIDFromHex(s string) (ObjectID, error) { + b, err := hex.DecodeString(s) + if err != nil { + return NilObjectID, err + } + + if len(b) != 12 { + return NilObjectID, ErrInvalidHex + } + + var oid [12]byte + copy(oid[:], b[:]) + + return oid, nil +} + +// MarshalJSON returns the ObjectID as a string +func (id ObjectID) MarshalJSON() ([]byte, error) { + return json.Marshal(id.Hex()) +} + +// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 24 bytes long, it +// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes +// long, it will be populated with the BSON representation of the ObjectID. This method also accepts empty strings and +// decodes them as NilObjectID. For any other inputs, an error will be returned. +func (id *ObjectID) UnmarshalJSON(b []byte) error { + // Ignore "null" to keep parity with the standard library. Decoding a JSON null into a non-pointer ObjectID field + // will leave the field unchanged. For pointer values, encoding/json will set the pointer to nil and will not + // enter the UnmarshalJSON hook. + if string(b) == "null" { + return nil + } + + var err error + switch len(b) { + case 12: + copy(id[:], b) + default: + // Extended JSON + var res interface{} + err := json.Unmarshal(b, &res) + if err != nil { + return err + } + str, ok := res.(string) + if !ok { + m, ok := res.(map[string]interface{}) + if !ok { + return errors.New("not an extended JSON ObjectID") + } + oid, ok := m["$oid"] + if !ok { + return errors.New("not an extended JSON ObjectID") + } + str, ok = oid.(string) + if !ok { + return errors.New("not an extended JSON ObjectID") + } + } + + // An empty string is not a valid ObjectID, but we treat it as a special value that decodes as NilObjectID. + if len(str) == 0 { + copy(id[:], NilObjectID[:]) + return nil + } + + if len(str) != 24 { + return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 24 but it is %d", len(str)) + } + + _, err = hex.Decode(id[:], []byte(str)) + if err != nil { + return err + } + } + + return err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go new file mode 100644 index 00000000..74ce5b6c --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/date.go @@ -0,0 +1,103 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "encoding/json" + "time" +) + +func init() { + d := Date{} + // register this format in the default registry + Default.Add("date", &d, IsDate) +} + +// IsDate returns true when the string is a valid date +func IsDate(str string) bool { + _, err := time.Parse(RFC3339FullDate, str) + return err == nil +} + +const ( + // RFC3339FullDate represents a full-date as specified by RFC3339 + // See: http://goo.gl/xXOvVd + RFC3339FullDate = "2006-01-02" +) + +// Date represents a date from the API +// +// swagger:strfmt date +type Date time.Time + +// String converts this date into a string +func (d Date) String() string { + return time.Time(d).Format(RFC3339FullDate) +} + +// UnmarshalText parses a text representation into a date type +func (d *Date) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + dd, err := time.Parse(RFC3339FullDate, string(text)) + if err != nil { + return err + } + *d = Date(dd) + return nil +} + +// MarshalText serializes this date type to string +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// MarshalJSON returns the Date as JSON +func (d Date) MarshalJSON() ([]byte, error) { + return json.Marshal(time.Time(d).Format(RFC3339FullDate)) +} + +// UnmarshalJSON sets the Date from JSON +func (d *Date) UnmarshalJSON(data []byte) error { + if string(data) == jsonNull { + return nil + } + var strdate string + if err := json.Unmarshal(data, &strdate); err != nil { + return err + } + tt, err := time.Parse(RFC3339FullDate, strdate) + if err != nil { + return err + } + *d = Date(tt) + return nil +} + +// DeepCopyInto copies the receiver and writes its value into out. +func (d *Date) DeepCopyInto(out *Date) { + *out = *d +} + +// DeepCopy copies the receiver into a new Date. +func (d *Date) DeepCopy() *Date { + if d == nil { + return nil + } + out := new(Date) + d.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go new file mode 100644 index 00000000..e85b0f1b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/strfmt/default.go @@ -0,0 +1,1562 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package strfmt + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/asaskevich/govalidator" + + netutils "k8s.io/utils/net" +) + +const ( + // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 + // A string instance is valid against this attribute if it is a valid + // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. + // http://tools.ietf.org/html/rfc1034#section-3.5 + // ::= any one of the ten digits 0 through 9 + // var digit = /[0-9]/; + // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case + // var letter = /[a-zA-Z]/; + // ::= | + // var letDig = /[0-9a-zA-Z]/; + // ::= | "-" + // var letDigHyp = /[-0-9a-zA-Z]/; + // ::= | + // var ldhStr = /[-0-9a-zA-Z]+/; + //